def export_clip_preview(self, filename, tracker: TrackExtractor):
        """
        Exports a clip showing the tracking and predictions for objects within the clip.
        """

        # increased resolution of video file.
        # videos look much better scaled up
        FRAME_SCALE = 4.0

        NORMALISATION_SMOOTH = 0.95

        auto_min = np.min(tracker.frames[0])
        auto_max = np.max(tracker.frames[0])

        # setting quality to 30 gives files approximately the same size as the original CPTV MPEG previews
        # (but they look quite compressed)
        mpeg = MPEGCreator(filename)

        for frame_number, thermal in enumerate(tracker.frames):
            auto_min = NORMALISATION_SMOOTH * auto_min + (
                1 - NORMALISATION_SMOOTH) * np.min(thermal)
            auto_max = NORMALISATION_SMOOTH * auto_max + (
                1 - NORMALISATION_SMOOTH) * np.max(thermal)

            thermal_image = convert_heat_to_img(thermal, self.colormap,
                                                auto_min, auto_max)
            thermal_image = thermal_image.resize(
                (int(thermal_image.width * FRAME_SCALE),
                 int(thermal_image.height * FRAME_SCALE)), Image.BILINEAR)

            if tracker.filtered_frames:
                if self.enable_side_by_side:
                    # put thermal & tracking images side by side
                    tracking_image = self.export_tracking_frame(
                        tracker, frame_number, FRAME_SCALE)
                    side_by_side_image = Image.new(
                        'RGB',
                        (tracking_image.width * 2, tracking_image.height))
                    side_by_side_image.paste(thermal_image, (0, 0))
                    side_by_side_image.paste(tracking_image,
                                             (tracking_image.width, 0))
                    mpeg.next_frame(np.asarray(side_by_side_image))
                else:
                    # overlay track rectanges on original thermal image
                    thermal_image = self.draw_track_rectangles(
                        tracker, frame_number, FRAME_SCALE, thermal_image)
                    mpeg.next_frame(np.asarray(thermal_image))

            else:
                # no filtered frames available (clip too hot or
                # background moving?) so just output the original
                # frame without the tracking frame.
                mpeg.next_frame(np.asarray(thermal_image))

            # we store the entire video in memory so we need to cap the frame count at some point.
            if frame_number > 9 * 60 * 10:
                break

        mpeg.close()
Esempio n. 2
0
    def export_clip_preview(self,
                            filename,
                            tracker: TrackExtractor,
                            track_predictions=None):
        """
        Exports a clip showing the tracking and predictions for objects within the clip.
        """

        # increased resolution of video file.
        # videos look much better scaled up
        if tracker.stats:
            self.auto_max = tracker.stats['max_temp']
            self.auto_min = tracker.stats['min_temp']
        else:
            print("Do not have temperatures to use.")
            return

        if bool(track_predictions
                ) and self.preview_type == self.PREVIEW_CLASSIFIED:
            self.create_track_descriptions(tracker, track_predictions)

        if self.preview_type == self.PREVIEW_TRACKING and not tracker.frame_buffer.flow:
            tracker.generate_optical_flow()

        mpeg = MPEGCreator(filename)

        for frame_number, thermal in enumerate(tracker.frame_buffer.thermal):
            if self.preview_type == self.PREVIEW_RAW:
                image = self.convert_and_resize(thermal)

            if self.preview_type == self.PREVIEW_TRACKING:
                image = self.create_four_tracking_image(
                    tracker.frame_buffer, frame_number)
                image = self.convert_and_resize(image, 3.0, mode=Image.NEAREST)
                draw = ImageDraw.Draw(image)
                regions = tracker.region_history[frame_number]
                self.add_regions(draw, regions)
                self.add_regions(draw, regions, v_offset=120)
                self.add_tracks(draw, tracker.tracks, frame_number)

            if self.preview_type == self.PREVIEW_BOXES:
                image = self.convert_and_resize(thermal, 4.0)
                draw = ImageDraw.Draw(image)
                screen_bounds = Region(0, 0, image.width, image.height)
                self.add_tracks(draw, tracker.tracks, frame_number)

            if self.preview_type == self.PREVIEW_CLASSIFIED:
                image = self.convert_and_resize(thermal, 4.0)
                draw = ImageDraw.Draw(image)
                screen_bounds = Region(0, 0, image.width, image.height)
                self.add_tracks(draw, tracker.tracks, frame_number,
                                track_predictions, screen_bounds)

            mpeg.next_frame(np.asarray(image))

            # we store the entire video in memory so we need to cap the frame count at some point.
            if frame_number > 9 * 60 * 10:
                break
        mpeg.close()
    def export_track_preview(self, filename, track):
        """
        Exports a clip showing tracking of one specific track with point in time predictions.
        """

        preview_scale = 4.0
        predictions = self.track_prediction[track].prediction_history
        mpeg = MPEGCreator(filename)

        for i in range(track.frames):
            # export a MPEG preview of the track
            frame = track.get_frame(i)
            draw_frame = np.float16(frame[:, :, 1])
            img = convert_heat_to_img(draw_frame, self.colormap, 0, 300)
            img = img.resize((int(
                img.width * preview_scale), int(img.height * preview_scale)),
                             Image.NEAREST)

            # just in case we don't have as many predictions as frames.
            if i >= len(predictions):
                continue

            # draw predictions
            prediction = predictions[i]

            best_labels = np.argsort(-prediction)[:3]

            width, height = img.width, img.height

            for i, label in enumerate(best_labels):
                draw = ImageDraw.Draw(img)
                score = prediction[label]
                x = 10
                y = height - 100 + 10 + i * 30
                draw.rectangle([x, y + 16, width - 10, y + 26],
                               outline=(0, 0, 0),
                               fill=(0, 64, 0, 64))
                draw.rectangle([x, y + 16, 10 + score * (width - 20), y + 26],
                               outline=(0, 0, 0),
                               fill=(64, 255, 0, 250))
                draw.text([x, y],
                          self.classifier.classes[label],
                          font=self.font)

            mpeg.next_frame(np.asarray(img))

        mpeg.close()
    def export_clip_preview(self, filename, clip: Clip, predictions=None):
        """
        Exports a clip showing the tracking and predictions for objects within the clip.
        """

        logging.info("creating clip preview %s", filename)

        # increased resolution of video file.
        # videos look much better scaled up
        if not clip.stats:
            logging.error("Do not have temperatures to use.")
            return

        if self.debug:
            footer = Previewer.stats_footer(clip.stats)
        if predictions and (self.preview_type == self.PREVIEW_CLASSIFIED
                            or self.preview_type == self.PREVIEW_TRACKING):
            self.create_track_descriptions(clip, predictions)

        if clip.stats.min_temp is None or clip.stats.max_temp is None:
            thermals = [frame.thermal for frame in clip.frame_buffer.frames]
            clip.stats.min_temp = np.amin(thermals)
            clip.stats.max_temp = np.amax(thermals)
        mpeg = MPEGCreator(filename)
        frame_scale = 4.0
        for frame_number, frame in enumerate(clip.frame_buffer):
            if self.preview_type == self.PREVIEW_RAW:
                image = self.convert_and_resize(frame.thermal,
                                                clip.stats.min_temp,
                                                clip.stats.max_temp)
                draw = ImageDraw.Draw(image)
            elif self.preview_type == self.PREVIEW_TRACKING:
                image = self.create_four_tracking_image(
                    frame,
                    clip.stats.min_temp,
                    clip.stats.max_temp,
                )
                draw = ImageDraw.Draw(image)
                self.add_tracks(
                    draw,
                    clip.tracks,
                    frame_number,
                    predictions,
                    scale=frame_scale,
                )

            elif self.preview_type == self.PREVIEW_BOXES:
                image = self.convert_and_resize(
                    frame.thermal,
                    clip.stats.min_temp,
                    clip.stats.max_temp,
                    frame_scale=frame_scale,
                )
                draw = ImageDraw.Draw(image)
                screen_bounds = Region(0, 0, image.width, image.height)
                self.add_tracks(
                    draw,
                    clip.tracks,
                    frame_number,
                    colours=[(128, 255, 255)],
                    scale=frame_scale,
                )

            elif self.preview_type == self.PREVIEW_CLASSIFIED:
                image = self.convert_and_resize(
                    frame.thermal,
                    clip.stats.min_temp,
                    clip.stats.max_temp,
                    frame_scale=frame_scale,
                )
                draw = ImageDraw.Draw(image)
                screen_bounds = Region(0, 0, image.width, image.height)
                self.add_tracks(
                    draw,
                    clip.tracks,
                    frame_number,
                    predictions,
                    screen_bounds,
                    scale=frame_scale,
                )
            if frame.ffc_affected:
                self.add_header(draw, image.width, image.height,
                                "Calibrating ...")
            if self.debug and draw:
                self.add_footer(draw, image.width, image.height, footer,
                                frame.ffc_affected)
            mpeg.next_frame(np.asarray(image))

            # we store the entire video in memory so we need to cap the frame count at some point.
            if frame_number > clip.frames_per_second * 60 * 10:
                break
        clip.frame_buffer.close_cache()
        mpeg.close()