コード例 #1
0
 def compute_diff(frames):
     no_blur, blur = frames
     diff1 = get_diff([bg, no_blur], max_distance,
                      lambda diff: diff > 30)
     diff2 = get_diff([self.blurframe(bg), blur], max_distance,
                      lambda diff: diff > 30)
     return diff1, diff2
コード例 #2
0
    def _pipeline_adaptive(self, debug=False):
        pipeline = Pipeline(debug)

        def get_background(frame):
            bg = pipeline.load("bg")
            if bg is None: bg = frame
            bg = bg.astype(float)
            fg = frame.astype(float)
            bg = cv2.accumulateWeighted(fg, bg, self.alpha)
            pipeline.store("bg", bg)
            # return background and the current frame which are than used by get_diff([bg,fg])
            return bg.astype(np.uint8), frame

        pipeline.add_operation("Input", lambda frame: self.get_preprocessing()(frame))

        pipeline.add_operation(["Background", "Current frame"],
                               get_background, hide=False)

        pipeline.add_operation("Difference",
                               lambda t: get_diff(list(t), self.distance,
                                                  lambda diff: diff > self.threshold))

        def remove_noise(frame, median_filter_size):
            median = cv2.medianBlur(frame, median_filter_size)
            open_ = cv2.morphologyEx(median, cv2.MORPH_OPEN,
                                     cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)),
                                     iterations=1)

            # return median, open_
            return median, open_

        pipeline.add_operation(["Median filter", "Median filter + Opening"],
                               partial(remove_noise, median_filter_size=3))

        pipeline.add_operation("Dilate", lambda frame: cv2.morphologyEx(frame[1], cv2.MORPH_DILATE,
                                                                        cv2.getStructuringElement(cv2.MORPH_CROSS,
                                                                                                  (25, 25)),
                                                                        iterations=1))

        pipeline.add_operation("Closing", lambda frame: cv2.morphologyEx(frame, cv2.MORPH_CLOSE,
                                                                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                                                                   (5, 5)),
                                                                         iterations=1))
        pipeline.add_operation("Erode", lambda frame: cv2.morphologyEx(frame, cv2.MORPH_ERODE,
                                                                       cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                                                                 (3, 3)), iterations=1))

        pipeline.add_operation("Closing", lambda frame: cv2.morphologyEx(frame, cv2.MORPH_CLOSE,
                                                                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                                                                   (20, 20)),
                                                                         iterations=1))

        # pipeline.add_operation("Output", lambda frame: frame.astype(np.uint8) * 255)

        pipeline.add_operation("Output", lambda mask: self.check_contours(pipeline.input, mask, debug))

        return pipeline
コード例 #3
0
def generate_mask_distances(video,
                            threshold_range,
                            num_frames=2,
                            preprocessing=None):
    # amount of thresholds
    threshold_list = list(threshold_range)
    distances = [l1, l2, max_distance]
    fig, axes = plt.subplots(len(threshold_list),
                             len(distances),
                             figsize=(10, 20))
    frames = video.get_frames(num_frames)
    for c, d in enumerate(distances):
        for r, t in enumerate(threshold_list):
            mask = get_diff(frames,
                            distance=d,
                            threshold=lambda diff: diff > t)
            mask = np.logical_not(mask).astype(np.uint8)
            axes[r, c].imshow(mask, cmap='gray')
            axes[r, c].set_title(f"distance={d.__name__} threshold={t}")

    plt.show()
コード例 #4
0
def generate_mask_examples(video,
                           frames_range,
                           threshold_range,
                           distance,
                           preprocessing=None):
    # amount of frames
    frames_list = list(frames_range)
    # amount of thresholds
    threshold_list = list(threshold_range)

    fig, axes = plt.subplots(len(frames_list),
                             len(threshold_list),
                             figsize=(10, 20))

    for n in range(len(frames_list)):
        # since n goes from 0 to len(frames_list) we have to add frames_range.start
        real_n = n + frames_range.start

        frames = video.get_frames(real_n, preprocessing)

        for t in range(len(threshold_list)):
            # same as before: we want to start from 1 and add threshold_range.step each time
            real_t = (t + 1) * threshold_range.step
            # object detected moving are white
            mask = get_diff(frames,
                            distance=distance,
                            threshold=lambda diff: diff > real_t)
            # object detected moving are now black
            mask = np.logical_not(mask)
            axes[n, t].imshow(mask.astype(np.uint8) * 255,
                              cmap='gray',
                              vmin=0,
                              vmax=255)
            axes[n, t].set_title(f"frames={real_n} threshold={real_t}")
    plt.tight_layout()
    plt.suptitle(f"using {distance.__name__} distance" + (
        f" and {preprocessing.__name__}" if preprocessing is not None else ""))
    plt.show()
コード例 #5
0
    def _pipeline_static_first(self, debug=False):
        pipeline = Pipeline(debug=debug)

        if self.kind == "static":
            # Get static background from the video
            self.video.current_frame_position = 254
            bg = self.video.get_frame(preprocessing=self.get_preprocessing())
            self.video.current_frame_position = 0
            pipeline.store("bg", bg.copy())
            del bg
        elif self.kind == "first":
            # Get the background interpolating first 100 frames
            bg = self.video.get_background(np.median, 100, preprocessing=self.get_preprocessing(), start=0)
            pipeline.store("bg", bg.copy())
            del bg
        else:
            pass

        pipeline.add_operation("Input", lambda frame: self.get_preprocessing()(frame))

        def get_background(frame):
            bg = pipeline.load("bg")
            return bg, frame

        pipeline.add_operation(["Background", "Current frame"],
                               get_background, hide=False)

        pipeline.add_operation("Difference",
                               lambda t: get_diff(list(t), self.distance,
                                                  lambda diff: diff > self.threshold))

        def remove_noise(frame, size):
            median = cv2.medianBlur(frame, size)
            open_ = cv2.morphologyEx(median, cv2.MORPH_OPEN,
                                     cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)),
                                     iterations=1)

            return median, open_

        pipeline.add_operation(["Median filter", "Median filter + Opening"], partial(remove_noise, size=7))

        pipeline.add_operation("Connect foreground vertically",
                               lambda frame: cv2.morphologyEx(frame[-1], cv2.MORPH_CLOSE,
                                                              cv2.getStructuringElement(cv2.MORPH_RECT, (5, 30)),
                                                              iterations=1))

        pipeline.add_operation("Connect foreground horizontally",
                               lambda frame: cv2.morphologyEx(frame, cv2.MORPH_CLOSE,
                                                              cv2.getStructuringElement(cv2.MORPH_RECT, (30, 5)),
                                                              iterations=1))

        pipeline.add_operation("Dilate for filling void",
                               lambda frame: cv2.morphologyEx(frame, cv2.MORPH_DILATE,
                                                              cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10)),
                                                              iterations=1))
        pipeline.add_operation("Close for filling holes",
                               lambda frame: cv2.morphologyEx(frame, cv2.MORPH_CLOSE,
                                                              cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30, 30)),
                                                              iterations=1))

        pipeline.add_operation("Output", lambda frame: self.check_contours(pipeline.input, frame, debug))

        return pipeline