示例#1
0
    def test_tracking_speed(self):
        config = Config.get_defaults()
        dir_name = os.path.dirname(os.path.realpath(__file__))
        file_name = os.path.join(dir_name,
                                 TestTrackingSpeed.CPTV_FILE_NO_BACKGROUND)
        print("Tracking cptv with no background ", file_name)
        track_extractor = ClipTrackExtractor(
            config.tracking,
            config.use_opt_flow
            or config.classify.preview == Previewer.PREVIEW_TRACKING,
            cache_to_disk=False,
            verbose=config.verbose,
        )
        start = time.time()
        clip = Clip(config.tracking, file_name)
        track_extractor.parse_clip(clip)
        ms_per_frame = ((time.time() - start) * 1000 /
                        max(1, len(clip.frame_buffer.frames)))
        print("Took {:.1f}ms per frame".format(ms_per_frame))
        assert ms_per_frame < TestTrackingSpeed.MAX_FRAME_MS

        file_name = os.path.join(dir_name,
                                 TestTrackingSpeed.CPTV_FILE_BACKGROUND)
        print("Tracking cptv with background ", file_name)
        start = time.time()
        clip = Clip(config.tracking, file_name)
        track_extractor.parse_clip(clip)
        ms_per_frame = ((time.time() - start) * 1000 /
                        max(1, len(clip.frame_buffer.frames)))
        print("Took {:.1f}ms per frame".format(ms_per_frame))
        assert ms_per_frame < TestTrackingSpeed.MAX_FRAME_MS
示例#2
0
    def new_clip(self):
        self.clip = Clip(self.config.tracking, "stream")
        self.clip.video_start_time = datetime.now()
        self.clip.num_preview_frames = self.preview_frames

        self.clip.set_res(self.res_x, self.res_y)
        self.clip.set_frame_buffer(
            self.config.classify_tracking.high_quality_optical_flow,
            self.config.classify.cache_to_disk,
            self.config.use_opt_flow,
            True,
        )

        # process preview_frames
        frames = self.motion_detector.thermal_window.get_frames()
        for frame in frames:
            self.track_extractor.process_frame(self.clip, frame.copy())
示例#3
0
    def extract_tracks(self, filename):
        if not os.path.exists(filename):
            raise Exception("File {} not found.".format(filename))
        logging.info("Processing file '{}'".format(filename))

        start = time.time()
        clip = Clip(self.config.tracking, filename)
        self.track_extractor.parse_clip(clip)
        return clip
示例#4
0
    def process_file(self, filename, **kwargs):
        """
        Process a file extracting tracks and identifying them.
        :param filename: filename to process
        :param enable_preview: if true an MPEG preview file is created.
        """

        if not os.path.exists(filename):
            raise Exception("File {} not found.".format(filename))

        logging.info("Processing file '{}'".format(filename))

        start = time.time()
        clip = Clip(self.tracker_config, filename)
        self.track_extractor.parse_clip(clip)

        classify_name = self.get_classify_filename(filename)
        destination_folder = os.path.dirname(classify_name)

        if not os.path.exists(destination_folder):
            logging.info("Creating folder {}".format(destination_folder))
            os.makedirs(destination_folder)

        mpeg_filename = classify_name + ".mp4"

        meta_filename = classify_name + ".txt"

        logging.info(os.path.basename(filename) + ":")

        for i, track in enumerate(clip.tracks):
            prediction = self.identify_track(clip, track)
            description = prediction.description(self.classifier.labels)
            logging.info(" - [{}/{}] prediction: {}".format(
                i + 1, len(clip.tracks), description))

        if self.previewer:
            logging.info("Exporting preview to '{}'".format(mpeg_filename))
            self.previewer.export_clip_preview(mpeg_filename, clip,
                                               self.predictions)
        logging.info("saving meta data")
        self.save_metadata(filename, meta_filename, clip)
        self.predictions.clear_predictions()

        if self.tracker_config.verbose:
            ms_per_frame = ((time.time() - start) * 1000 /
                            max(1, len(clip.frame_buffer.frames)))
            logging.info("Took {:.1f}ms per frame".format(ms_per_frame))
示例#5
0
    def classify_file(self, filename):
        if not os.path.exists(filename):
            raise Exception("File {} not found.".format(filename))
        logging.info("Processing file '{}'".format(filename))

        # prediction record for each track
        predictions = Predictions(self.classifier.labels)

        start = time.time()
        clip = Clip(self.tracker_config, filename)
        self.track_extractor.parse_clip(clip)

        for i, track in enumerate(clip.tracks):
            prediction = self.identify_track(clip, track)
            predictions.prediction_per_track[track.get_id()] = prediction
            description = prediction.description(self.classifier.labels)
            logging.info(" - [{}/{}] prediction: {}".format(
                i + 1, len(clip.tracks), description))
        if self.tracker_config.verbose:
            ms_per_frame = ((time.time() - start) * 1000 /
                            max(1, len(clip.frame_buffer.frames)))
            logging.info("Took {:.1f}ms per frame".format(ms_per_frame))

        return clip, predictions
示例#6
0
class PiClassifier(Processor):
    """ Classifies frames from leptond """

    PROCESS_FRAME = 3
    NUM_CONCURRENT_TRACKS = 1
    DEBUG_EVERY = 100
    MAX_CONSEC = 3
    # after every MAX_CONSEC frames skip this many frames
    # this gives the cpu a break
    SKIP_FRAMES = 7

    def __init__(self, config, thermal_config, classifier):
        self.frame_num = 0
        self.clip = None
        self.tracking = False
        self.enable_per_track_information = False
        self.rolling_track_classify = {}
        self.skip_classifying = 0
        self.classified_consec = 0
        self.config = config
        self.classifier = classifier
        self.num_labels = len(classifier.labels)
        self._res_x = self.config.res_x
        self._res_y = self.config.res_y
        self.predictions = Predictions(classifier.labels)
        self.preview_frames = (thermal_config.recorder.preview_secs *
                               thermal_config.recorder.frame_rate)
        edge = self.config.tracking.edge_pixels
        self.crop_rectangle = tools.Rectangle(edge, edge,
                                              self.res_x - 2 * edge,
                                              self.res_y - 2 * edge)

        try:
            self.fp_index = self.classifier.labels.index("false-positive")
        except ValueError:
            self.fp_index = None

        self.track_extractor = ClipTrackExtractor(
            self.config.tracking,
            self.config.use_opt_flow,
            self.config.classify.cache_to_disk,
            keep_frames=False,
            calc_stats=False,
        )
        self.motion_config = thermal_config.motion
        self.min_frames = (thermal_config.recorder.min_secs *
                           thermal_config.recorder.frame_rate)
        self.max_frames = (thermal_config.recorder.max_secs *
                           thermal_config.recorder.frame_rate)
        self.motion_detector = MotionDetector(
            self.res_x,
            self.res_y,
            thermal_config,
            self.config.tracking.dynamic_thresh,
            CPTVRecorder(thermal_config),
        )
        self.startup_classifier()

        self._output_dir = thermal_config.recorder.output_dir
        self.meta_dir = os.path.join(thermal_config.recorder.output_dir,
                                     "metadata")
        if not os.path.exists(self.meta_dir):
            os.makedirs(self.meta_dir)

    def new_clip(self):
        self.clip = Clip(self.config.tracking, "stream")
        self.clip.video_start_time = datetime.now()
        self.clip.num_preview_frames = self.preview_frames

        self.clip.set_res(self.res_x, self.res_y)
        self.clip.set_frame_buffer(
            self.config.classify_tracking.high_quality_optical_flow,
            self.config.classify.cache_to_disk,
            self.config.use_opt_flow,
            True,
        )

        # process preview_frames
        frames = self.motion_detector.thermal_window.get_frames()
        for frame in frames:
            self.track_extractor.process_frame(self.clip, frame.copy())

    def startup_classifier(self):
        # classifies an empty frame to force loading of the model into memory

        p_frame = np.zeros((5, 48, 48), np.float32)
        self.classifier.classify_frame_with_novelty(p_frame, None)

    def get_active_tracks(self):
        """
        Gets current clips active_tracks and returns the top NUM_CONCURRENT_TRACKS order by priority
        """
        active_tracks = self.clip.active_tracks
        if len(active_tracks) <= PiClassifier.NUM_CONCURRENT_TRACKS:
            return active_tracks
        active_predictions = []
        for track in active_tracks:
            prediction = self.predictions.get_or_create_prediction(
                track, keep_all=False)
            active_predictions.append(prediction)

        top_priority = sorted(
            active_predictions,
            key=lambda i: i.get_priority(self.clip.frame_on),
            reverse=True,
        )

        top_priority = [
            track.track_id
            for track in top_priority[:PiClassifier.NUM_CONCURRENT_TRACKS]
        ]
        classify_tracks = [
            track for track in active_tracks if track.get_id() in top_priority
        ]
        return classify_tracks

    def identify_last_frame(self):
        """
        Runs through track identifying segments, and then returns it's prediction of what kind of animal this is.
        One prediction will be made for every active_track of the last frame.
        :return: TrackPrediction object
        """

        prediction_smooth = 0.1

        smooth_prediction = None
        smooth_novelty = None

        prediction = 0.0
        novelty = 0.0
        active_tracks = self.get_active_tracks()
        frame = self.clip.frame_buffer.get_last_frame()
        if frame is None:
            return
        thermal_reference = np.median(frame.thermal)

        for i, track in enumerate(active_tracks):
            track_prediction = self.predictions.get_or_create_prediction(
                track, keep_all=False)
            region = track.bounds_history[-1]
            if region.frame_number != frame.frame_number:
                logging.warning(
                    "frame doesn't match last frame {} and {}".format(
                        region.frame_number, frame.frame_number))
            else:
                track_data = track.crop_by_region(frame, region)
                # we use a tighter cropping here so we disable the default 2 pixel inset
                frames = Preprocessor.apply([track_data], [thermal_reference],
                                            default_inset=0)
                if frames is None:
                    logging.warning(
                        "Frame {} of track could not be classified.".format(
                            region.frame_number))
                    continue
                p_frame = frames[0]
                (
                    prediction,
                    novelty,
                    state,
                ) = self.classifier.classify_frame_with_novelty(
                    p_frame, track_prediction.state)
                track_prediction.state = state

                if self.fp_index is not None:
                    prediction[self.fp_index] *= 0.8
                state *= 0.98
                mass = region.mass
                mass_weight = np.clip(mass / 20, 0.02, 1.0)**0.5
                cropped_weight = 0.7 if region.was_cropped else 1.0

                prediction *= mass_weight * cropped_weight

                if len(track_prediction.predictions) == 0:
                    if track_prediction.uniform_prior:
                        smooth_prediction = np.ones([self.num_labels
                                                     ]) * (1 / self.num_labels)
                    else:
                        smooth_prediction = prediction
                    smooth_novelty = 0.5
                else:
                    smooth_prediction = track_prediction.predictions[-1]
                    smooth_novelty = track_prediction.novelties[-1]
                    smooth_prediction = (
                        1 - prediction_smooth
                    ) * smooth_prediction + prediction_smooth * prediction
                    smooth_novelty = (
                        1 - prediction_smooth
                    ) * smooth_novelty + prediction_smooth * novelty
                track_prediction.classified_frame(self.clip.frame_on,
                                                  smooth_prediction,
                                                  smooth_novelty)

    def get_recent_frame(self):
        return self.motion_detector.get_recent_frame()

    def disconnected(self):
        self.end_clip()
        self.motion_detector.disconnected()

    def skip_frame(self):
        self.skip_classifying -= 1

        if self.clip:
            self.clip.frame_on += 1

    def process_frame(self, lepton_frame):
        start = time.time()
        self.motion_detector.process_frame(lepton_frame)
        if self.motion_detector.recorder.recording:
            if self.clip is None:
                self.new_clip()
            self.track_extractor.process_frame(
                self.clip, lepton_frame.pix, self.motion_detector.ffc_affected)
            if self.motion_detector.ffc_affected or self.clip.on_preview():
                self.skip_classifying = PiClassifier.SKIP_FRAMES
                self.classified_consec = 0
            elif (self.motion_detector.ffc_affected is False
                  and self.clip.active_tracks and self.skip_classifying <= 0
                  and not self.clip.on_preview()):
                self.identify_last_frame()
                self.classified_consec += 1
                if self.classified_consec == PiClassifier.MAX_CONSEC:
                    self.skip_classifying = PiClassifier.SKIP_FRAMES
                    self.classified_consec = 0

        elif self.clip is not None:
            self.end_clip()

        self.skip_classifying -= 1
        self.frame_num += 1
        end = time.time()
        timetaken = end - start
        if (self.motion_detector.can_record()
                and self.frame_num % PiClassifier.DEBUG_EVERY == 0):
            logging.info(
                "fps {}/sec time to process {}ms cpu % {} memory % {}".format(
                    round(1 / timetaken, 2),
                    round(timetaken * 1000, 2),
                    psutil.cpu_percent(),
                    psutil.virtual_memory()[2],
                ))

    def create_mp4(self):
        previewer = Previewer(self.config, "classified")
        previewer.export_clip_preview(self.clip.get_id() + ".mp4", self.clip,
                                      self.predictions)

    def end_clip(self):
        if self.clip:
            for _, prediction in self.predictions.prediction_per_track.items():
                if prediction.max_score:
                    logging.info("Clip {} {}".format(
                        self.clip.get_id(),
                        prediction.description(self.predictions.labels),
                    ))
            self.save_metadata()
            self.predictions.clear_predictions()
            self.clip = None
            self.tracking = False

    def save_metadata(self):
        filename = datetime.now().strftime("%Y%m%d.%H%M%S.%f.meta")

        # record results in text file.
        save_file = {}
        start, end = self.clip.start_and_end_time_absolute()
        save_file["start_time"] = start.isoformat()
        save_file["end_time"] = end.isoformat()
        save_file["temp_thresh"] = self.clip.temp_thresh
        save_file["algorithm"] = {}
        save_file["algorithm"]["model"] = self.config.classify.model
        save_file["algorithm"]["tracker_version"] = self.clip.VERSION
        save_file["tracks"] = []
        for track in self.clip.tracks:
            track_info = {}
            prediction = self.predictions.prediction_for(track.get_id())
            start_s, end_s = self.clip.start_and_end_in_secs(track)
            save_file["tracks"].append(track_info)
            track_info["start_s"] = round(start_s, 2)
            track_info["end_s"] = round(end_s, 2)
            track_info["num_frames"] = track.frames
            track_info["frame_start"] = track.start_frame
            track_info["frame_end"] = track.end_frame
            if prediction and prediction.best_label_index is not None:
                track_info["label"] = self.classifier.labels[
                    prediction.best_label_index]
                track_info["confidence"] = round(prediction.score(), 2)
                track_info["clarity"] = round(prediction.clarity, 3)
                track_info["average_novelty"] = round(
                    prediction.average_novelty, 2)
                track_info["max_novelty"] = round(prediction.max_novelty, 2)
                track_info["all_class_confidences"] = {}
                for i, value in enumerate(prediction.class_best_score):
                    label = self.classifier.labels[i]
                    track_info["all_class_confidences"][label] = round(
                        float(value), 3)

            positions = []
            for region in track.bounds_history:
                track_time = round(
                    region.frame_number / self.clip.frames_per_second, 2)
                positions.append([track_time, region])
            track_info["positions"] = positions

        with open(os.path.join(self.meta_dir, filename), "w") as f:
            json.dump(save_file, f, indent=4, cls=tools.CustomJSONEncoder)

    @property
    def res_x(self):
        return self._res_x

    @property
    def res_y(self):
        return self._res_y

    @property
    def output_dir(self):
        return self._output_dir
示例#7
0
    def process_file(self, filename, cache=None, reuse_frames=None):
        """
        Process a file extracting tracks and identifying them.
        :param filename: filename to process
        :param enable_preview: if true an MPEG preview file is created.
        """

        base_filename = os.path.splitext(os.path.basename(filename))[0]
        meta_file = os.path.join(os.path.dirname(filename), base_filename + ".txt")
        if not os.path.exists(filename):
            raise Exception("File {} not found.".format(filename))
        if not os.path.exists(meta_file):
            raise Exception("File {} not found.".format(meta_file))
        meta_data = tools.load_clip_metadata(meta_file)
        logging.info("Processing file '{}'".format(filename))
        cache_to_disk = (
            cache if cache is not None else self.config.classify.cache_to_disk
        )
        start = time.time()
        clip = Clip(self.config.tracking, filename)
        clip.set_frame_buffer(
            self.high_quality_optical_flow,
            cache_to_disk,
            self.config.use_opt_flow,
            True,
        )
        clip.load_metadata(
            meta_data,
            self.config.load.tag_precedence,
        )
        frames = []
        with open(clip.source_file, "rb") as f:
            reader = CPTVReader(f)
            clip.set_res(reader.x_resolution, reader.y_resolution)
            clip.calculate_background(reader)
            f.seek(0)
            for frame in reader:
                if frame.background_frame:
                    continue
                clip.add_frame(
                    frame.pix,
                    frame.pix - clip.background,
                    ffc_affected=is_affected_by_ffc(frame),
                )
        predictions_per_model = {}
        if self.model:
            prediction = self.classify_clip(
                clip, self.model, meta_data, reuse_frames=reuse_frames
            )
            predictions_per_model[self.model.id] = prediction
        else:
            for model in self.config.classify.models:
                prediction = self.classify_clip(
                    clip, model, meta_data, reuse_frames=reuse_frames
                )
                predictions_per_model[model.id] = prediction

        if self.previewer:
            mpeg_filename = os.path.join(
                os.path.dirname(filename), base_filename + ".mp4"
            )
            logging.info("Exporting preview to '{}'".format(mpeg_filename))

            self.previewer.export_clip_preview(
                mpeg_filename, clip, list(predictions_per_model.values())[0]
            )
        logging.info("saving meta data %s", meta_file)
        models = [self.model] if self.model else self.config.classify.models
        meta_data = self.save_metadata(
            meta_data,
            meta_file,
            clip,
            predictions_per_model,
            models,
        )
        if cache_to_disk:
            clip.frame_buffer.remove_cache()
        return meta_data