示例#1
0
 def set_event_params(self,
                      min_event_len=2,
                      time_pre_event="1.5s",
                      time_post_event="2s"):
     # type: (...) -> None
     """ Sets motion event parameters. """
     assert self._video_fps is not None
     self._min_event_len = FrameTimecode(min_event_len, self._video_fps)
     self._pre_event_len = FrameTimecode(time_pre_event, self._video_fps)
     self._post_event_len = FrameTimecode(time_post_event, self._video_fps)
示例#2
0
 def set_video_time(self, start_time=None, end_time=None, duration=None):
     # type: (str, str, str) -> None
     """ Used to select a sub-set of the video in time for processing. """
     assert self._video_fps is not None
     if start_time is not None:
         self._start_time = FrameTimecode(start_time, self._video_fps)
     if duration is not None:
         duration = FrameTimecode(duration, self._video_fps)
         if self._start_time is not None:
             self._end_time = FrameTimecode(
                 self._start_time.frame_num + duration.frame_num,
                 self._video_fps)
         else:
             self._end_time = duration
     elif end_time is not None:
         self._end_time = FrameTimecode(end_time, self._video_fps)
示例#3
0
    def __init__(self, args):
        """ Initializes the ScanContext with the supplied arguments. """
        if not args.quiet_mode:
            print("[DVR-Scan] Initializing scan context...")

        self.initialized = False

        self.event_list = []

        self.suppress_output = args.quiet_mode
        self.frames_read = -1
        self.frames_processed = -1
        self.frames_total = -1
        self._cap = None
        self._cap_path = None

        self.video_resolution = None
        self.video_fps = None
        self.video_paths = [input_file.name for input_file in args.input]
        # We close the open file handles, as only the paths are required.
        for input_file in args.input:
            input_file.close()
        if not len(args.fourcc_str) == 4:
            print(
                "[DVR-Scan] Error: Specified codec (-c/--codec) must be exactly 4 characters."
            )
            return
        if args.kernel_size == -1:
            self.kernel = None
        elif (args.kernel_size % 2) == 0:
            print(
                "[DVR-Scan] Error: Kernel size must be an odd, positive integer (e.g. 3, 5, 7."
            )
            return
        else:
            self.kernel = np.ones((args.kernel_size, args.kernel_size),
                                  np.uint8)
        self.fourcc = cv2.VideoWriter_fourcc(*args.fourcc_str.upper())
        self.comp_file = None
        self.scan_only_mode = args.scan_only_mode
        if args.output:
            self.comp_file = args.output.name
            args.output.close()
        # Check the input video(s) and obtain the framerate/resolution.
        if self._load_input_videos():
            # Motion detection and output related arguments
            self.threshold = args.threshold
            if self.kernel is None:
                if self.video_resolution[0] >= 1920:
                    self.kernel = np.ones((7, 7), np.uint8)
                elif self.video_resolution[0] >= 1280:
                    self.kernel = np.ones((5, 5), np.uint8)
                else:
                    self.kernel = np.ones((3, 3), np.uint8)
            # Event detection window properties
            self.min_event_len = FrameTimecode(self.video_fps,
                                               args.min_event_len)
            self.pre_event_len = FrameTimecode(self.video_fps,
                                               args.time_pre_event)
            self.post_event_len = FrameTimecode(self.video_fps,
                                                args.time_post_event)
            # Start time, end time, and duration
            self.start_time, self.end_time = None, None
            if args.start_time is not None:
                self.start_time = FrameTimecode(self.video_fps,
                                                args.start_time)
            if args.duration is not None:
                duration = FrameTimecode(self.video_fps, args.duration)
                if isinstance(self.start_time, FrameTimecode):
                    self.end_time = FrameTimecode(
                        self.video_fps,
                        self.start_time.frame_num + duration.frame_num)
                else:
                    self.end_time = duration
            elif args.end_time is not None:
                self.end_time = FrameTimecode(self.video_fps, args.end_time)
            # Video processing related arguments
            self.frame_skip = args.frame_skip
            #self.downscale_factor = args.downscale_factor

            self.initialized = True
示例#4
0
    def scan_motion(self):
        """ Performs motion analysis on the ScanContext's input video(s). """
        if self.initialized is not True:
            print(
                "[DVR-Scan] Error: Scan context uninitialized, no analysis performed."
            )
            return
        print("[DVR-Scan] Scanning %s for motion events..." %
              ("%d input videos" % len(self.video_paths)
               if len(self.video_paths) > 1 else "input video"))

        bg_subtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
        buffered_frames = []
        event_window = []
        self.event_list = []
        num_frames_post_event = 0
        event_start = None

        video_writer = None
        output_prefix = ''
        if self.comp_file:
            video_writer = cv2.VideoWriter(self.comp_file, self.fourcc,
                                           self.video_fps,
                                           self.video_resolution)
        elif len(self.video_paths[0]) > 0:
            output_prefix = os.path.basename(self.video_paths[0])
            dot_index = output_prefix.rfind('.')
            if dot_index > 0:
                output_prefix = output_prefix[:dot_index]

        curr_pos = FrameTimecode(self.video_fps, 0)
        #curr_state = 'no_event'     # 'no_event', 'in_event', or 'post_even
        in_motion_event = False
        num_frames_read = 0
        num_frames_processed = 0
        processing_start = time.time()

        tqdm = dvr_scan.platform.get_tqdm()
        progress_bar = None
        self.frames_total = int(self.frames_total)
        if tqdm is not None and self.frames_total > 0 and not self.suppress_output:
            if self.end_time and self.end_time.frame_num < self.frames_total:
                self.frames_total = self.end_time.frame_num
            if self.start_time:
                self.frames_total -= self.start_time.frame_num
            if self.frames_total < 0:
                self.frames_total = 0
            progress_bar = tqdm.tqdm(total=self.frames_total,
                                     unit=' frames',
                                     desc="[DVR-Scan] Processed")

        # Seek to starting position if required.
        if self.start_time is not None:
            while curr_pos.frame_num < self.start_time.frame_num:
                if self._get_next_frame(False) is None:
                    break
                num_frames_read += 1
                curr_pos.frame_num += 1

        # Motion event scanning/detection loop.
        while True:
            if self.end_time is not None and curr_pos.frame_num >= self.end_time.frame_num:
                break
            if self.frame_skip > 0:
                for _ in range(self.frame_skip):
                    if self._get_next_frame(False) is None:
                        break
                    curr_pos.frame_num += 1
                    num_frames_read += 1
                    if progress_bar:
                        progress_bar.update(1)
            frame_rgb = self._get_next_frame()
            if frame_rgb is None:
                break

            frame_gray = cv2.cvtColor(frame_rgb, cv2.COLOR_BGR2GRAY)
            frame_mask = bg_subtractor.apply(frame_gray)
            frame_filt = cv2.morphologyEx(frame_mask, cv2.MORPH_OPEN,
                                          self.kernel)
            frame_score = np.sum(frame_filt) / float(
                frame_filt.shape[0] * frame_filt.shape[1])
            event_window.append(frame_score)
            event_window = event_window[-self.min_event_len.frame_num:]

            if in_motion_event:
                # in event or post event, write all queued frames to file,
                # and write current frame to file.
                # if the current frame doesn't meet the threshold, increment
                # the current scene's post-event counter.
                if not self.scan_only_mode:
                    video_writer.write(frame_rgb)
                if frame_score >= self.threshold:
                    num_frames_post_event = 0
                else:
                    num_frames_post_event += 1
                    if num_frames_post_event >= self.post_event_len.frame_num:
                        in_motion_event = False
                        event_end = FrameTimecode(self.video_fps,
                                                  curr_pos.frame_num)
                        event_duration = FrameTimecode(
                            self.video_fps,
                            curr_pos.frame_num - event_start.frame_num)
                        self.event_list.append(
                            (event_start, event_end, event_duration))
                        if not self.comp_file and not self.scan_only_mode:
                            video_writer.release()
            else:
                if not self.scan_only_mode:
                    buffered_frames.append(frame_rgb)
                    buffered_frames = buffered_frames[-self.pre_event_len.
                                                      frame_num:]
                if len(event_window) >= self.min_event_len.frame_num and all(
                        score >= self.threshold for score in event_window):
                    in_motion_event = True
                    event_window = []
                    num_frames_post_event = 0
                    event_start = FrameTimecode(self.video_fps,
                                                curr_pos.frame_num)
                    # Open new VideoWriter if needed, write buffered_frames to file.
                    if not self.scan_only_mode:
                        if not self.comp_file:
                            output_path = '%s.DSME_%04d.avi' % (
                                output_prefix, len(self.event_list))
                            video_writer = cv2.VideoWriter(
                                output_path, self.fourcc, self.video_fps,
                                self.video_resolution)
                        for frame in buffered_frames:
                            video_writer.write(frame)
                        buffered_frames = []

            curr_pos.frame_num += 1
            num_frames_read += 1
            num_frames_processed += 1
            if progress_bar:
                progress_bar.update(1)

        # If we're still in a motion event, we still need to compute the duration
        # and ending timecode and add it to the event list.
        if in_motion_event:
            curr_pos.frame_num -= 1  # Correct for the increment at the end of the loop
            event_end = FrameTimecode(self.video_fps, curr_pos.frame_num)
            event_duration = FrameTimecode(
                self.video_fps, curr_pos.frame_num - event_start.frame_num)
            self.event_list.append((event_start, event_end, event_duration))

        if video_writer is not None:
            video_writer.release()
        if progress_bar is not None:
            progress_bar.close()
        elif not self.suppress_output:
            processing_time = time.time() - processing_start
            processing_rate = float(num_frames_read) / processing_time
            print(
                "[DVR-Scan] Processed %d / %d frames read in %3.1f secs (avg %3.1f FPS)."
                % (num_frames_processed, num_frames_read, processing_time,
                   processing_rate))
        if not len(self.event_list) > 0:
            print("[DVR-Scan] No motion events detected in input.")
            return

        print("[DVR-Scan] Detected %d motion events in input." %
              len(self.event_list))
        print("[DVR-Scan] Scan-only mode specified, list of motion events:")
        print("-------------------------------------------------------------")
        print("|   Event #    |  Start Time  |   Duration   |   End Time   |")
        print("-------------------------------------------------------------")
        for event_num, (event_start, event_end,
                        event_duration) in enumerate(self.event_list):
            print("|  Event %4d  |  %s  |  %s  |  %s  |" %
                  (event_num + 1, event_start.get_timecode(precision=1),
                   event_duration.get_timecode(precision=1),
                   event_end.get_timecode(precision=1)))
        print("-------------------------------------------------------------")

        if self.scan_only_mode:
            print("[DVR-Scan] Comma-separated timecode values:")
            timecode_list = []
            for event_start, event_end, event_duration in self.event_list:
                timecode_list.append(event_start.get_timecode())
                timecode_list.append(event_end.get_timecode())
            print(','.join(timecode_list))
        else:
            print("[DVR-Scan] Motion events written to disk.")
示例#5
0
    def scan_motion(self):
        # type: () -> None
        """ Performs motion analysis on the ScanContext's input video(s). """
        self._logger.info(
            "Scanning %s for motion events...",
            "%d input videos" % len(self._video_paths)
            if len(self._video_paths) > 1 else "input video")
        bg_subtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
        buffered_frames = []
        event_window = []
        self.event_list = []
        num_frames_post_event = 0
        event_start = None

        video_writer = None
        output_prefix = ''
        if not self._comp_file and len(self._video_paths[0]) > 0:
            output_prefix = os.path.basename(self._video_paths[0])
            dot_index = output_prefix.rfind('.')
            if dot_index > 0:
                output_prefix = output_prefix[:dot_index]

        curr_pos = FrameTimecode(0, self._video_fps)
        in_motion_event = False
        self._frames_processed = 0
        processing_start = time.time()

        # Seek to starting position if required.
        if self._start_time is not None:
            while curr_pos.frame_num < self._start_time.frame_num:
                if self._get_next_frame(False) is None:
                    break
                self._frames_processed += 1
                curr_pos.frame_num += 1

        # Show ROI selection window if required.
        if not self._select_roi(curr_time=curr_pos,
                                add_timecode=self._draw_timecode):
            return

        # TQDM-based progress bar, or a stub if in quiet mode (or no TQDM).
        progress_bar = self._create_progress_bar(
            show_progress=self._show_progress, num_frames=self._frames_total)

        # Motion event scanning/detection loop.
        while self.running:
            if self._end_time is not None and curr_pos.frame_num >= self._end_time.frame_num:
                break
            if self._frame_skip > 0:
                for _ in range(self._frame_skip):
                    if self._get_next_frame(False) is None:
                        break
                    curr_pos.frame_num += 1
                    self._frames_processed += 1
                    progress_bar.update(1)
            frame_rgb = self._get_next_frame()
            if frame_rgb is None:
                break
            frame_rgb_origin = frame_rgb
            # Cut frame to selected sub-set if ROI area provided.
            if self._roi:
                frame_rgb = frame_rgb[int(self._roi[1]):int(self._roi[1] +
                                                            self._roi[3]),
                                      int(self._roi[0]):int(self._roi[0] +
                                                            self._roi[2])]
            # Apply downscaling factor if provided.
            if self._downscale_factor > 1:
                frame_rgb = frame_rgb[::self._downscale_factor, ::self.
                                      _downscale_factor, :]

            frame_gray = cv2.cvtColor(frame_rgb, cv2.COLOR_BGR2GRAY)
            frame_mask = bg_subtractor.apply(frame_gray)
            if self._kernel is not None:
                frame_filt = cv2.morphologyEx(frame_mask, cv2.MORPH_OPEN,
                                              self._kernel)
            else:
                frame_filt = frame_mask
            frame_score = np.sum(frame_filt) / float(
                frame_filt.shape[0] * frame_filt.shape[1])
            event_window.append(frame_score)
            event_window = event_window[-self._min_event_len.frame_num:]

            if in_motion_event:
                # in event or post event, write all queued frames to file,
                # and write current frame to file.
                # if the current frame doesn't meet the threshold, increment
                # the current scene's post-event counter.
                if not self._scan_only:
                    if self._draw_timecode:
                        self._stamp_text(frame_rgb_origin,
                                         curr_pos.get_timecode())
                    video_writer.write(frame_rgb_origin)
                if frame_score >= self._threshold:
                    num_frames_post_event = 0
                else:
                    num_frames_post_event += 1
                    if num_frames_post_event >= self._post_event_len.frame_num:
                        in_motion_event = False
                        event_end = FrameTimecode(curr_pos.frame_num,
                                                  self._video_fps)
                        event_duration = FrameTimecode(
                            curr_pos.frame_num - event_start.frame_num,
                            self._video_fps)
                        self.event_list.append(
                            (event_start, event_end, event_duration))
                        if not self._comp_file and not self._scan_only:
                            video_writer.release()
            else:
                if not self._scan_only:
                    buffered_frames.append(frame_rgb_origin)
                    buffered_frames = buffered_frames[-self._pre_event_len.
                                                      frame_num:]
                if len(event_window) >= self._min_event_len.frame_num and all(
                        score >= self._threshold for score in event_window):
                    in_motion_event = True
                    event_window = []
                    num_frames_post_event = 0
                    event_start = FrameTimecode(curr_pos.frame_num,
                                                self._video_fps)
                    # Open new VideoWriter if needed, write buffered_frames to file.
                    if not self._scan_only:
                        if not self._comp_file or video_writer is None:
                            output_path = (
                                self._comp_file
                                if self._comp_file else '%s.DSME_%04d.avi' %
                                (output_prefix, len(self.event_list)))
                            video_writer = cv2.VideoWriter(
                                output_path, self._fourcc, self._video_fps,
                                self._video_resolution)
                        for frame in buffered_frames:
                            if self._draw_timecode:
                                self._stamp_text(frame,
                                                 curr_pos.get_timecode())
                            video_writer.write(frame)
                        buffered_frames = []

            curr_pos.frame_num += 1
            self._frames_processed += 1
            progress_bar.update(1)

        # If we're still in a motion event, we still need to compute the duration
        # and ending timecode and add it to the event list.
        if in_motion_event:
            curr_pos.frame_num -= 1  # Correct for the increment at the end of the loop
            event_end = FrameTimecode(curr_pos.frame_num, self._video_fps)
            event_duration = FrameTimecode(
                curr_pos.frame_num - event_start.frame_num, self._video_fps)
            self.event_list.append((event_start, event_end, event_duration))

        if video_writer is not None:
            video_writer.release()
        if progress_bar is not None:
            progress_bar.close()

        self._post_scan_motion(processing_start=processing_start)

        return self.event_list