def execute(self) -> StageResult:
        """
        Registers the history frame.
        """

        history_frames = self._history_frames.history_frames

        transformation_matrices = self._transformation_matrices.transformation_matrices

        self.shifted_history_frames = [
            Frame(
                cv2.warpPerspective(
                    history_frame.get_frame(),
                    M,
                    (
                        history_frame.get_frame().shape[1],
                        history_frame.get_frame().shape[0],
                    ),
                ).astype(np.uint8),
                history_frame.get_frame_number(),
            ) for history_frame, M in zip(history_frames,
                                          transformation_matrices)
        ]

        return StageResult(True, True)
    def execute(self) -> StageResult:
        moving_foreground = self._moving_foreground.moving_foreground.get_frame()

        element = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (self._erode_kernel_size, self._erode_kernel_size)
        )
        eroded = cv2.erode(moving_foreground, element)

        element = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (self._dilate_kernel_size, self._dilate_kernel_size)
        )
        opened_mask = cv2.dilate(eroded, element)

        combined_mask = np.zeros(opened_mask.shape).astype(np.uint8)
        combined_mask[opened_mask == moving_foreground] = 255
        combined_mask[moving_foreground == 0] = 0

        element = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (self._combine_kernel_size, self._combine_kernel_size)
        )
        dialated = cv2.dilate(combined_mask, element)
        self.moving_foreground = Frame(
            cv2.erode(dialated, element),
            self._moving_foreground.moving_foreground.get_frame_number(),
        )

        return StageResult(True, True)
    def _detect_and_compute(self, frame: Frame):
        if frame not in self._feature_hash:
            keypoints, descriptors = self._orb.detectAndCompute(
                frame.get_frame(), None)
            self._feature_hash[frame] = (keypoints, descriptors)

        return self._feature_hash[frame]
Exemplo n.º 4
0
    def execute(self) -> StageResult:
        """
        Detect and returns locations of blobs from foreground mask
        Returns list of coordinates
        """

        foreground_mask = self._moving_foregrouned.moving_foreground.get_frame()

        contours, _ = cv2.findContours(
            foreground_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
        )
        rectangles = [cv2.boundingRect(c) for c in contours]
        rectangles = [(r[0], r[1], r[0] + r[2], r[1] + r[3]) for r in rectangles]

        blob_image = cv2.cvtColor(foreground_mask, cv2.COLOR_GRAY2BGR)
        for rect in rectangles:
            blob_image = cv2.rectangle(
                blob_image, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 0), 2
            )

        self.blob_image = Frame(
            blob_image,
            self._moving_foregrouned.moving_foreground.get_frame_number(),
        )

        self.baboons = [Baboon(r) for r in rectangles]
        return StageResult(True, True)
Exemplo n.º 5
0
    def execute(self) -> StageResult:
        if self._result is None:
            self._result = np.zeros_like(
                self._moving_foreground.moving_foreground.get_frame())

            self._motion_observations = np.zeros_like(
                self._moving_foreground.moving_foreground.get_frame())

            self._no_motion_observations = np.zeros_like(
                self._moving_foreground.moving_foreground.get_frame())

        mask = self._moving_foreground.moving_foreground.get_frame() == 255
        not_mask = self._moving_foreground.moving_foreground.get_frame() == 0

        self._motion_observations[not_mask] = 0
        self._motion_observations[mask] += 1

        self._no_motion_observations[mask] = 0
        self._no_motion_observations[not_mask] += 1

        self._result[self._motion_observations ==
                     self._required_motion_observations] = 255

        self._result[self._no_motion_observations ==
                     self._required_motion_observations] = 0

        self.moving_foreground = Frame(
            self._result,
            self._moving_foreground.moving_foreground.get_frame_number())

        return StageResult(True, True)
Exemplo n.º 6
0
 def _quantize_frame(self, frame: Frame):
     """
     Normalize pixel values from 0-255 to values from 0-self._scale_factor
     Returns quantized frame
     """
     return np.floor(frame.get_frame().astype(np.float32) *
                     self._scale_factor / 255.0).astype(np.int32)
    def _detect_and_compute(self, frame: Frame):
        if frame not in self._feature_hash:
            keypoints = self._fast.detect(frame.get_frame(), None)
            keypoints = ssc(
                keypoints,
                10000,
                0.1,
                frame.get_frame().shape[1],
                frame.get_frame().shape[0],
            )
            descriptors = self._orb.compute(frame.get_frame(), keypoints)

            keypoints = descriptors[0]
            descriptors = descriptors[1]

            self._feature_hash[frame] = (keypoints, descriptors)

        return self._feature_hash[frame]
Exemplo n.º 8
0
    def execute(self) -> StageResult:
        self.processed_frame = Frame(
            cv2.fastNlMeansDenoising(
                self._preprocessed_frame.processed_frame.get_frame(), h=5
            ),
            self._preprocessed_frame.processed_frame.get_frame_number(),
        )

        return StageResult(True, True)
Exemplo n.º 9
0
    def execute(self) -> StageResult:
        """
        Get a video frame from a video file.
        """

        success, frame = self._capture.read()

        self.frame = Frame(frame, self._frame_number)
        self._frame_number += 1

        return StageResult(success, success)
    def execute(self) -> StageResult:
        """
        Converts a color image to a gray-scale image.
        """

        self.processed_frame = Frame(
            cv2.cvtColor(self._frame_mixin.frame.get_frame(),
                         cv2.COLOR_BGR2GRAY),
            self._frame_mixin.frame.get_frame_number(),
        )
        return StageResult(True, True)
Exemplo n.º 11
0
    def execute(self) -> StageResult:
        # This cleans up the edges after performing image registration.
        for mask in self._shifted_masks.shifted_masks:
            self.moving_foreground = Frame(
                np.multiply(
                    self._moving_foreground.moving_foreground.get_frame(), mask
                ),
                self._frame.frame.get_frame_number(),
            )

        return StageResult(True, True)
Exemplo n.º 12
0
    def execute(self) -> StageResult:
        weights = self._weights.weights
        foreground = self._foreground.foreground
        history_of_dissimilarity = (
            self._history_of_dissimilarity.history_of_dissimilarity)

        self.moving_foreground = Frame(
            self._get_moving_foreground(weights, foreground,
                                        history_of_dissimilarity),
            self._frame.frame.get_frame_number(),
        )

        return StageResult(True, True)
Exemplo n.º 13
0
    def execute(self) -> StageResult:
        """
        Blurs a gray frame using a Gaussian blur.
        """

        self.processed_frame = Frame(
            cv2.GaussianBlur(
                self._preprocessed_frame.processed_frame.get_frame(), (5, 5),
                0),
            self._preprocessed_frame.processed_frame.get_frame_number(),
        )

        return StageResult(True, True)
Exemplo n.º 14
0
    def execute(self) -> StageResult:
        moving_foreground = np.zeros_like(
            self._moving_foreground.moving_foreground.get_frame())
        height, width = moving_foreground.shape

        _execute(
            moving_foreground,
            self._moving_foreground.moving_foreground.get_frame(),
            self._group_size,
            height,
            width,
        )

        self.moving_foreground = Frame(
            moving_foreground,
            self._moving_foreground.moving_foreground.get_frame_number(),
        )

        return StageResult(True, True)
Exemplo n.º 15
0
    def execute(self) -> StageResult:
        region_frame = self._frame.frame.get_frame().copy()

        rectangles = [(b.rectangle, b.id_str) for b in self._baboons.baboons]
        for rect, id_str in rectangles:
            region_frame = cv2.rectangle(region_frame, (rect[0], rect[1]),
                                         (rect[2], rect[3]), (0, 255, 0), 2)

            if id_str is not None:
                cv2.putText(
                    region_frame,
                    id_str,
                    (rect[0], rect[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.9,
                    (36, 255, 12),
                    2,
                )

        self.region_frame = Frame(region_frame,
                                  self._frame.frame.get_frame_number())

        return StageResult(True, True)
Exemplo n.º 16
0
    def execute(self) -> StageResult:
        """
        Detect and returns locations of blobs from foreground mask
        Returns list of coordinates
        """

        foreground_mask = self._moving_foregrouned.moving_foreground.get_frame(
        )
        keypoints = self._detector.detect(foreground_mask)

        self.blob_image = Frame(
            cv2.drawKeypoints(
                cv2.cvtColor(foreground_mask, cv2.COLOR_GRAY2BGR),
                keypoints,
                np.array([]),
                (0, 255, 0),
                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
            ),
            self._moving_foregrouned.moving_foreground.get_frame_number(),
        )

        self.baboons = [Baboon((k.pt[0], k.pt[1]), k.size) for k in keypoints]
        return StageResult(True, True)