예제 #1
0
    def process_frames(self, clip, raw_frames):
        # for now just always calculate as we are using the stats...
        # background np.float64[][] filtered calculated here and stats
        non_ffc_frames = [
            frame.pix for frame in raw_frames if not is_affected_by_ffc(frame)
        ]
        if len(non_ffc_frames) == 0:
            logging.warn("Clip only has ffc affected frames")
            return False
        clip.background_from_whole_clip(non_ffc_frames)
        # not sure if we want to include ffc frames here or not
        self._whole_clip_stats(clip, non_ffc_frames)
        if clip.background_is_preview:

            if clip.preview_frames > 0:
                clip.background_from_frames(raw_frames)
            else:
                logging.info(
                    "No preview secs defined for CPTV file - using statistical background measurement"
                )

        # process each frame
        for frame in raw_frames:
            ffc_affected = is_affected_by_ffc(frame)
            self._process_frame(clip, frame.pix, ffc_affected)
            clip.frame_on += 1
    def parse_clip(self, clip, process_background=False):
        """
        Loads a cptv file, and prepares for track extraction.
        """
        self.tracking_time = None
        start = time.time()
        clip.set_frame_buffer(
            self.high_quality_optical_flow,
            self.cache_to_disk,
            self.use_opt_flow,
            self.keep_frames,
        )

        with open(clip.source_file, "rb") as f:
            reader = CPTVReader(f)
            clip.set_res(reader.x_resolution, reader.y_resolution)
            if clip.from_metadata:
                for track in clip.tracks:
                    track.crop_regions()
            camera_model = None
            if reader.model:
                camera_model = reader.model.decode()
            clip.set_model(camera_model)

            # if we have the triggered motion threshold should use that
            # maybe even override dynamic threshold with this value
            if reader.motion_config:
                motion = yaml.safe_load(reader.motion_config)
                temp_thresh = motion.get("triggeredthresh")
                if temp_thresh:
                    clip.temp_thresh = temp_thresh

            video_start_time = reader.timestamp.astimezone(Clip.local_tz)
            clip.set_video_stats(video_start_time)
            clip.calculate_background(reader)

        with open(clip.source_file, "rb") as f:
            reader = CPTVReader(f)
            for frame in reader:
                if not process_background and frame.background_frame:
                    continue
                self.process_frame(clip, frame.pix, is_affected_by_ffc(frame))

        if not clip.from_metadata:
            self.apply_track_filtering(clip)

        if self.calc_stats:
            clip.stats.completed(clip.current_frame, clip.res_y, clip.res_x)
        self.tracking_time = time.time() - start
        return True
예제 #3
0
    def parse_clip(self, clip):
        """
        Loads a cptv file, and prepares for track extraction.
        """

        clip.set_frame_buffer(
            self.config.high_quality_optical_flow,
            self.cache_to_disk,
            self.use_opt_flow,
            self.keep_frames,
        )

        with open(clip.source_file, "rb") as f:
            reader = CPTVReader(f)
            clip.set_res(reader.x_resolution, reader.y_resolution)
            video_start_time = reader.timestamp.astimezone(Clip.local_tz)
            clip.num_preview_frames = (
                reader.preview_secs * clip.frames_per_second -
                self.config.ignore_frames)
            clip.set_video_stats(video_start_time)
            # we need to load the entire video so we can analyse the background.

            if clip.background_is_preview and clip.num_preview_frames > 0:
                for frame in reader:
                    self.process_frame(clip, frame.pix,
                                       is_affected_by_ffc(frame))

                if clip.on_preview():
                    logging.warn("Clip is all preview frames")
                    if clip.background is None:
                        logging.warn("Clip only has ffc affected frames")
                        return False

                    clip._set_from_background()
                    self._process_preview_frames(clip)
            else:
                clip.background_is_preview = False
                self.process_frames(clip, [frame for frame in reader])

        if not clip.from_metadata:
            self.apply_track_filtering(clip)

        if self.calc_stats:
            clip.stats.completed(clip.frame_on, clip.res_y, clip.res_x)

        return True
예제 #4
0
    def calculate_background(self, frame_reader):
        """
        Calculate background by reading whole clip and grouping into sets of
        9 frames. Take the average of these 9 frames and use the minimum
        over the sets as the initial background
        Also check for animals in the background by checking for connected components in
        the intital_diff frame - this is the maximum change between first average frame and all other average frames in the clip
        """
        frames = []
        if frame_reader.background_frames > 0:
            for frame in frame_reader:
                if frame.background_frame:
                    frames.append(frame.pix)
                else:
                    break
            frame_average = np.average(frames, axis=0)
            self.update_background(frame_average)
            self._background_calculated()
            return

        initial_frames = None
        initial_diff = None
        first_frame = None
        for frame in frame_reader:
            if first_frame is None:
                first_frame = frame.pix
            ffc_affected = is_affected_by_ffc(frame)
            if ffc_affected:
                continue
            frames.append(frame.pix)
            if len(frames) == 9:
                frame_average = np.average(frames, axis=0)
                self.update_background(frame_average)
                initial_diff = self.calculate_initial_diff(
                    frame_average, initial_frames, initial_diff)
                if initial_frames is None:
                    initial_frames = frame_average

                frames = []

        if len(frames) > 0:
            frame_average = np.average(frames, axis=0)
            if initial_frames is None:
                initial_frames = frame_average
            self.update_background(frame_average)
            initial_diff = self.calculate_initial_diff(frame_average,
                                                       initial_frames,
                                                       initial_diff)

            if initial_frames is None:
                initial_frames = frame_average
        frames = []
        if initial_diff is None:
            if first_frame is not None:
                # fall back if whole clip is ffc
                self.update_background(frame.pix)
                self._background_calculated()
            return
        np.clip(initial_diff, 0, None, out=initial_diff)
        initial_frames = self.remove_background_animals(
            initial_frames, initial_diff)

        self.update_background(initial_frames)
        self._background_calculated()