コード例 #1
0
class FrameFetcher:
    __slots__ = ("source", "current_frame")

    def __init__(self, video_path):
        self.source = File_Source(SimpleNamespace(),
                                  source_path=video_path,
                                  timing=None,
                                  fill_gaps=True)
        if not self.source.initialised:
            raise FileNotFoundError(video_path)
        self.current_frame = self.source.get_frame()

    def closest_frame_to_ts(self, ts):
        closest_idx = pm.find_closest(self.source.timestamps, ts)
        return self.frame_for_idx(closest_idx)

    def frame_for_idx(self, requested_frame_idx):
        if requested_frame_idx != self.current_frame.index:
            if requested_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame,
                # its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(int(requested_frame_idx))

            try:
                self.current_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info("End of video {}.".format(self.source.source_path))
        return self.current_frame
コード例 #2
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )

    #  NOTE: Start time of the export recording will be synced with world recording
    #  export! This means that if the recording to export started later than the world
    #  video, the first frame of the exported recording will not be at timestamp 0 in
    #  the recording, but later. Some video players (e.g. VLC on windows) might display
    #  the video weirdly in this case, but we rather want syncronization between the
    #  exported video!
    start_time = export_window[0]
    writer = MPEG_Writer(output_file, start_time)

    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #3
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )
    writer = AV_Writer(
        output_file, fps=input_source.frame_rate, audio_dir=None, use_timestamps=True
    )
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #4
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(EmptyGPool(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps,
                                    (export_start, export_stop - 1))
    (export_from_index,
     export_to_index) = pm.find_closest(input_source.timestamps, export_window)
    writer = AV_Writer(output_file,
                       fps=input_source.frame_rate,
                       audio_dir=None,
                       use_timestamps=True)
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index)
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #5
0
class Eye_Wrapper(object):
    def __init__(self, g_pool, eyeid, pos, hdpi_fac=1., hflip=False, vflip=False):
        super().__init__()
        self.g_pool = g_pool
        self.eyeid = eyeid
        self.pos = pos
        self.hflip = hflip
        self.vflip = vflip
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        self.drag_offset = None
        self.menu = None
        self.hdpi_fac = hdpi_fac

    def initliaze_video(self, rec_dir, world_timestamps):
        eye_loc = os.path.join(rec_dir, 'eye{}.*'.format(self.eyeid))
        try:
            self.source = File_Source(Empty(), source_path=glob(eye_loc)[0])
            self.current_eye_frame = self.source.get_frame()
        except (FileNotFoundError, IndexError, FileCaptureError):
            logger.warning('Video for eye{} was not found or could not be opened.'.format(self.eyeid))
        else:
            self.eye_world_frame_map = correlate_eye_world(self.source.timestamps, world_timestamps)
            if self.menu is not None:
                self.menu.read_only = False

    def add_eye_menu(self, parent):
        self.menu = ui.Growing_Menu('Eye {}'.format(self.eyeid))
        parent.append(self.menu)
        self.menu.append(ui.Switch('hflip', self, label='Horizontal flip'))
        self.menu.append(ui.Switch('vflip', self, label='Vertical flip'))
        self.menu.read_only = not self.initialized

    def remove_eye_menu(self, parent):
        parent.remove(self.menu)
        self.menu = None

    def deinitliaze_video(self):
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        if self.menu is not None:
            self.menu.read_only = True

    @property
    def initialized(self):
        return self.source is not None

    @property
    def config(self):
        return {'pos': self.pos, 'hflip': self.hflip, 'vflip': self.vflip}

    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(requested_eye_frame_idx)

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info("Reached the end of the eye video for eye video {}.".format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = int(pos[0] + self.drag_offset[0]), int(pos[1] + self.drag_offset[1])

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = round(self.current_eye_frame.width * scale), round(self.current_eye_frame.height * scale)

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
                    min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)))


        # 4. vflipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next((pp for pp in pupil_positions if pp['id'] == self.eyeid and pp['timestamp'] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                el = pp['ellipse']
                conf = int(pp.get('model_confidence', pp.get('confidence', 0.1)) * 255)
                el_points = getEllipsePts((el['center'], el["axes"], el['angle']))
                cv2.polylines(eyeimage, [np.asarray(el_points,dtype='i')], True, (0, 0, 255, conf), thickness=1)
                cv2.circle(eyeimage,(int(el['center'][0]),int(el['center'][1])), 5, (0, 0, 255, conf), thickness=-1)


        #flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)

    def on_click(self, pos, button, action, hdpi_fac, eye_scale):
        self.hdpi_fac = hdpi_fac
        if not self.initialized:
            return False  # click event has not been consumed

        video_size = round(self.current_eye_frame.width * eye_scale), round(self.current_eye_frame.height * eye_scale)

        if (self.pos[0] < pos[0] < self.pos[0] + video_size[0] and
                self.pos[1] < pos[1] < self.pos[1] + video_size[1]):
            self.drag_offset = self.pos[0] - pos[0], self.pos[1] - pos[1]
            return True
        else:
            self.drag_offset = None
            return False
コード例 #6
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import MPEG_Audio_Writer

    # We are not importing manual gaze correction. In Player corrections have already
    # been applied.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(f"{__name__} with pid: {PID}")
    start_status = f"Starting video export with pid: {PID}"
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        recording = PupilRecording(rec_dir)
        meta_info = recording.meta_info

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.process = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        videos = recording.files().core().world().videos()
        if not videos:
            raise FileNotFoundError("No world video found")

        source_path = videos[0].resolve()
        cap = File_Source(g_pool,
                          source_path=source_path,
                          fill_gaps=True,
                          timing=None)
        if not cap.initialised:
            warn = "Trying to export zero-duration world video."
            logger.warning(warn)
            yield warn, 0.0
            return

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and
        # give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        logger.debug(
            f"Will export from frame {start_frame} to frame "
            f"{start_frame + frames_to_export}. This means I will export "
            f"{frames_to_export} frames.")

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.PupilDataBisector.from_init_dict(
            pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        try:
            # setup of writer
            writer = MPEG_Audio_Writer(
                out_file_path,
                start_time_synced=trimmed_timestamps[0],
                audio_dir=rec_dir,
            )

            while frames_to_export > current_frame:
                try:
                    frame = cap.get_frame()
                except EndofVideoError:
                    break

                events = {"frame": frame}
                # new positions and events
                frame_window = pm.enclosing_window(g_pool.timestamps,
                                                   frame.index)
                events["gaze"] = g_pool.gaze_positions.by_ts_window(
                    frame_window)
                events["pupil"] = g_pool.pupil_positions.by_ts_window(
                    frame_window)

                # publish delayed notifications when their time has come.
                for n in list(g_pool.delayed_notifications.values()):
                    if n["_notify_time_"] < time():
                        del n["_notify_time_"]
                        del g_pool.delayed_notifications[n["subject"]]
                        g_pool.notifications.append(n)

                # notify each plugin if there are new notifications:
                while g_pool.notifications:
                    n = g_pool.notifications.pop(0)
                    for p in g_pool.plugins:
                        p.on_notify(n)

                # allow each Plugin to do its work.
                for p in g_pool.plugins:
                    p.recent_events(events)

                writer.write_video_frame(frame)
                current_frame += 1
                yield "Exporting with pid {}".format(PID), current_frame
        except GeneratorExit:
            logger.warning(f"Video export with pid {PID} was canceled.")
            writer.close(timestamp_export_format=None,
                         closed_suffix=".canceled")
            return

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        logger.info(
            f"Export done: Exported {current_frame} frames to {out_file_path}. "
            f"This took {duration} seconds. "
            f"Exporter ran at {effective_fps} frames per second.")
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning(f"Video export with pid {PID} was canceled.")
コード例 #7
0
ファイル: exporter.py プロジェクト: shaoxuan92/pupil
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           min_data_confidence,
           start_frame=None,
           end_frame=None,
           plugin_initializers=(),
           out_file_path=None):

    vis_plugins = sorted([
        Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points, Vis_Watermark,
        Scan_Path
    ],
                         key=lambda x: x.__name__)
    analysis_plugins = sorted([
        Manual_Gaze_Correction, Eye_Video_Overlay,
        Pupil_Angle_3D_Fixation_Detector, Gaze_Position_2D_Fixation_Detector
    ],
                              key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(
        os.path.join(user_dir, 'plugins')),
                          key=lambda x: x.__name__)
    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    index_by_name = dict(zip(name_by_index, range(len(name_by_index))))
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [
        f for f in glob(os.path.join(rec_dir, "world.*"))
        if f[-3:] in ('mp4', 'mkv', 'avi')
    ][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    meta_info = load_meta_info(rec_dir)
    rec_version = read_rec_version(meta_info)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.rec_version = rec_version
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']
    g_pool.pupil_positions_by_frame = correlate_data(pupil_list,
                                                     g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(gaze_list,
                                                    g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps
                                 ]  #populated by the fixation detector plugin

    #add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[
            frame.index]

        # publish delayed notifiactions when their time has come.
        for n in g_pool.delayed_notifications.values():
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
コード例 #8
0
def export_undistorted_h264(distorted_video_loc, target_video_loc,
                            export_range):
    yield "Converting scene video", .1
    capture = File_Source(Empty(), distorted_video_loc)
    if not capture.initialised:
        yield "Converting scene video failed", 0.
        return

    update_rate = 10
    start_time = None
    time_base = Fraction(1, 65535)
    average_fps = int(
        len(capture.timestamps) /
        (capture.timestamps[-1] - capture.timestamps[0]))

    target_container = av.open(target_video_loc, 'w')
    video_stream = target_container.add_stream('mpeg4', 1 / time_base)
    video_stream.bit_rate = 150e6
    video_stream.bit_rate_tolerance = video_stream.bit_rate / 20
    video_stream.thread_count = max(1, mp.cpu_count() - 1)
    video_stream.width, video_stream.height = capture.frame_size

    av_frame = av.VideoFrame(*capture.frame_size, 'bgr24')
    av_frame.time_base = time_base

    capture.seek_to_frame(export_range[0])
    next_update_idx = export_range[0] + update_rate
    while True:
        try:
            frame = capture.get_frame()
        except EndofVideoError:
            break

        if frame.index > export_range[1]:
            break

        if start_time is None:
            start_time = frame.timestamp

        undistorted_img = capture.intrinsics.undistort(frame.img)
        av_frame.planes[0].update(undistorted_img)
        av_frame.pts = int((frame.timestamp - start_time) / time_base)

        packet = video_stream.encode(av_frame)
        if packet:
            target_container.mux(packet)

        if capture.current_frame_idx >= next_update_idx:
            progress = ((capture.current_frame_idx - export_range[0]) /
                        (export_range[1] - export_range[0])) * .9 + .1
            yield "Converting scene video", progress * 100.
            next_update_idx += update_rate

    while True:  # flush encoder
        packet = video_stream.encode()
        if packet:
            target_container.mux(packet)
        else:
            break

    target_container.close()
    capture.cleanup()
    yield "Converting scene video completed", 1. * 100.
コード例 #9
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_eye_video_overlay import Vis_Eye_Video_Overlay
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(f for f in glob(os.path.join(rec_dir, "world.*"))
                              if os.path.splitext(f)[1] in valid_ext)
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool,
                          source_path=video_path,
                          fill_gaps=True,
                          timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_dir=rec_dir,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(
            os.getpid()))
コード例 #10
0
class Eye_Wrapper(object):
    def __init__(self,
                 g_pool,
                 eyeid,
                 pos,
                 hdpi_fac=1.0,
                 hflip=False,
                 vflip=False):
        super().__init__()
        self.g_pool = g_pool
        self.eyeid = eyeid
        self.pos = pos
        self.hflip = hflip
        self.vflip = vflip
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        self.drag_offset = None
        self.menu = None
        self.hdpi_fac = hdpi_fac

    def initliaze_video(self, rec_dir, world_timestamps):
        eye_loc = os.path.join(rec_dir, "eye{}.*".format(self.eyeid))
        try:
            self.source = File_Source(SimpleNamespace(),
                                      source_path=glob(eye_loc)[0],
                                      timing=None)
            self.current_eye_frame = self.source.get_frame()
        except (FileNotFoundError, IndexError):
            logger.warning(
                "Video for eye{} was not found or could not be opened.".format(
                    self.eyeid))
        else:
            self.eye_world_frame_map = correlate_eye_world(
                self.source.timestamps, world_timestamps)
            if self.menu is not None:
                self.menu.read_only = False

    def add_eye_menu(self, parent):
        self.menu = ui.Growing_Menu("Eye {}".format(self.eyeid))
        parent.append(self.menu)
        self.menu.append(ui.Switch("hflip", self, label="Horizontal flip"))
        self.menu.append(ui.Switch("vflip", self, label="Vertical flip"))
        self.menu.read_only = not self.initialized

    def remove_eye_menu(self, parent):
        parent.remove(self.menu)
        self.menu = None

    def deinitliaze_video(self):
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        if self.menu is not None:
            self.menu.read_only = True

    @property
    def initialized(self):
        return self.source is not None

    @property
    def config(self):
        return {"pos": self.pos, "hflip": self.hflip, "vflip": self.vflip}

    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(int(requested_eye_frame_idx))

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info(
                    "Reached the end of the eye video for eye video {}.".
                    format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = (
                int(pos[0] + self.drag_offset[0]),
                int(pos[1] + self.drag_offset[1]),
            )

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = (
            round(self.current_eye_frame.width * scale),
            round(self.current_eye_frame.height * scale),
        )

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (
            min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
            min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)),
        )

        # 4. flipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next(
                    (pp for pp in pupil_positions if pp["id"] == self.eyeid
                     and pp["timestamp"] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                draw_pupil_on_image(eyeimage, pp)

        # flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)

    def on_click(self, pos, button, action, hdpi_fac, eye_scale):
        self.hdpi_fac = hdpi_fac
        if not self.initialized:
            return False  # click event has not been consumed

        video_size = (
            round(self.current_eye_frame.width * eye_scale),
            round(self.current_eye_frame.height * eye_scale),
        )

        if (self.pos[0] < pos[0] < self.pos[0] + video_size[0]
                and self.pos[1] < pos[1] < self.pos[1] + video_size[1]):
            self.drag_offset = self.pos[0] - pos[0], self.pos[1] - pos[1]
            return True
        else:
            self.drag_offset = None
            return False
コード例 #11
0
ファイル: exporter.py プロジェクト: xiuxi/pupil
def export(rec_dir,
           user_dir,
           min_data_confidence,
           start_frame=None,
           end_frame=None,
           plugin_initializers=(),
           out_file_path=None,
           pre_computed={}):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))
    start_status = 'Starting video export with pid: {}'.format(os.getpid())
    print(start_status)
    yield start_status, 0

    try:
        update_recording_to_recent(rec_dir)

        vis_plugins = sorted([
            Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points,
            Vis_Watermark, Vis_Scan_Path, Vis_Eye_Video_Overlay
        ],
                             key=lambda x: x.__name__)
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(import_runtime_plugins(
            os.path.join(user_dir, 'plugins')),
                              key=lambda x: x.__name__)

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        update_recording_to_recent(rec_dir)

        video_path = [
            f for f in glob(os.path.join(rec_dir, "world.*"))
            if os.path.splitext(f)[-1] in ('.mp4', '.mkv', '.avi', '.mjpeg')
        ][0]
        pupil_data_path = os.path.join(rec_dir, "pupil_data")
        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = 'exporter'
        g_pool.min_data_confidence = min_data_confidence
        cap = File_Source(g_pool, video_path)
        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = 'world_viz.mp4'
            out_file_path = os.path.expanduser(
                os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []
        # load pupil_positions, gaze_positions
        pupil_data = pre_computed.get("pupil_data") or load_object(
            pupil_data_path)
        g_pool.pupil_data = pupil_data
        g_pool.pupil_positions = pre_computed.get(
            "pupil_positions") or pupil_data['pupil_positions']
        g_pool.gaze_positions = pre_computed.get(
            "gaze_positions") or pupil_data['gaze_positions']
        g_pool.fixations = []  # populated by the fixation detector plugin

        g_pool.pupil_positions_by_frame = correlate_data(
            g_pool.pupil_positions, g_pool.timestamps)
        g_pool.gaze_positions_by_frame = correlate_data(
            g_pool.gaze_positions, g_pool.timestamps)
        g_pool.fixations_by_frame = [
            [] for x in g_pool.timestamps
        ]  # populated by the fixation detector plugin

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoFileError:
                break

            events = {'frame': frame}
            # new positons and events
            events['gaze_positions'] = g_pool.gaze_positions_by_frame[
                frame.index]
            events['pupil_positions'] = g_pool.pupil_positions_by_frame[
                frame.index]

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n['_notify_time_'] < time():
                    del n['_notify_time_']
                    del g_pool.delayed_notifications[n['subject']]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield 'Exporting', current_frame

        writer.close()
        writer = None

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield 'Export done. This took {:.0f} seconds.'.format(
            duration), current_frame

    except GeneratorExit:
        print('Video export with pid {} was canceled.'.format(os.getpid()))
    except:
        from time import sleep
        import traceback
        trace = traceback.format_exc()
        print('Process Export (pid: {}) crashed with trace:\n{}'.format(
            os.getpid(), trace))
        sleep(1.0)
コード例 #12
0
ファイル: exporter.py プロジェクト: neuroidss/pupil
def export(should_terminate, frames_to_export, current_frame, rec_dir, user_dir, min_data_confidence,
           start_frame=None, end_frame=None, plugin_initializers=(), out_file_path=None,pre_computed={}):

    vis_plugins = sorted([Vis_Circle,Vis_Cross,Vis_Polyline,Vis_Light_Points,
        Vis_Watermark,Vis_Scan_Path,Vis_Eye_Video_Overlay], key=lambda x: x.__name__)
    analysis_plugins = sorted([ Pupil_Angle_3D_Fixation_Detector,
                               Gaze_Position_2D_Fixation_Detector], key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) if f[-3:] in ('mp4', 'mkv', 'avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")
    audio_path = os.path.join(rec_dir, "audio.mp4")

    meta_info = load_meta_info(rec_dir)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    # Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to {}".format(out_file_path))

    # Trim mark verification
    # make sure the trim marks (start frame, endframe) make sense:
    # We define them like python list slices, thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame is None:
        start_frame = 0

    # these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
    logger.debug(exp_info.format(start_frame, start_frame + frames_to_export.value, frames_to_export.value))

    # setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.meta_info = meta_info
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []
    # load pupil_positions, gaze_positions
    pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
    g_pool.pupil_data = pupil_data
    g_pool.pupil_positions = pre_computed.get("pupil_positions") or pupil_data['pupil_positions']
    g_pool.gaze_positions = pre_computed.get("gaze_positions") or pupil_data['gaze_positions']
    g_pool.fixations = [] # populated by the fixation detector plugin

    g_pool.pupil_positions_by_frame = correlate_data(g_pool.pupil_positions,g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(g_pool.gaze_positions,g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps]  # populated by the fixation detector plugin

    # add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported {} frames to {}.".format(current_frame.value, out_file_path))

            # explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {'frame':frame}
        # new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[frame.index]

        # publish delayed notifiactions when their time has come.
        for n in list(g_pool.delayed_notifications.values()):
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.recent_events(events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
    logger.info(result.format(current_frame.value, out_file_path, duration, effective_fps))
    return True
コード例 #13
0
def export_processed_h264(
    world_timestamps,
    unprocessed_video_loc,
    target_video_loc,
    export_range,
    process_frame,
    export_timestamps,
):
    yield "Converting video", 0.1
    capture = File_Source(Empty(), unprocessed_video_loc)
    if not capture.initialised:
        yield "Converting scene video failed", 0.0
        return

    export_window = pm.exact_window(world_timestamps, export_range)
    (export_from_index,
     export_to_index) = pm.find_closest(capture.timestamps, export_window)

    update_rate = 10
    start_time = None
    time_base = Fraction(1, 65535)

    target_container = av.open(target_video_loc, "w")
    video_stream = target_container.add_stream("mpeg4", 1 / time_base)
    video_stream.bit_rate = 150e6
    video_stream.bit_rate_tolerance = video_stream.bit_rate / 20
    video_stream.thread_count = max(1, mp.cpu_count() - 1)
    video_stream.width, video_stream.height = capture.frame_size

    av_frame = av.VideoFrame(*capture.frame_size, "bgr24")
    av_frame.time_base = time_base

    capture.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    timestamps = []
    while True:
        try:
            frame = capture.get_frame()
        except EndofVideoError:
            break

        if frame.index > export_to_index:
            break

        if start_time is None:
            start_time = frame.timestamp

        undistorted_img = process_frame(capture, frame)
        av_frame.planes[0].update(undistorted_img)
        av_frame.pts = int((frame.timestamp - start_time) / time_base)

        if export_timestamps:
            timestamps.append(frame.timestamp)

        packet = video_stream.encode(av_frame)
        if packet:
            target_container.mux(packet)

        if capture.current_frame_idx >= next_update_idx:
            progress = ((capture.current_frame_idx - export_from_index) /
                        (export_to_index - export_from_index)) * 0.9 + 0.1
            yield "Converting video", progress * 100.0
            next_update_idx += update_rate

    while True:  # flush encoder
        packet = video_stream.encode()
        if packet:
            target_container.mux(packet)
        else:
            break

    if export_timestamps:
        write_timestamps(target_video_loc, timestamps)

    target_container.close()
    capture.cleanup()
    yield "Converting video completed", 1.0 * 100.0
コード例 #14
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector
    from eye_movement import Offline_Eye_Movement_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector, Offline_Eye_Movement_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(
                f
                for f in glob(os.path.join(rec_dir, "world.*"))
                if os.path.splitext(f)[1] in valid_ext
            )
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool, source_path=video_path, fill_gaps=True, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(
                start_frame, start_frame + frames_to_export, frames_to_export
            )
        )

        # setup of writer
        writer = AV_Writer(
            out_file_path, fps=cap.frame_rate, audio_dir=rec_dir, use_timestamps=True
        )

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.pupil_positions_by_id = (
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_0"]),
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_1"]),
        )
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration, effective_fps)
        )
        yield "Export done. This took {:.0f} seconds.".format(duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(os.getpid()))