def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )
    writer = AV_Writer(
        output_file, fps=input_source.frame_rate, audio_dir=None, use_timestamps=True
    )
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(EmptyGPool(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps,
                                    (export_start, export_stop - 1))
    (export_from_index,
     export_to_index) = pm.find_closest(input_source.timestamps, export_window)
    writer = AV_Writer(output_file,
                       fps=input_source.frame_rate,
                       audio_dir=None,
                       use_timestamps=True)
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index)
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
示例#3
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """
    def __init__(
            self,
            g_pool,
            device_id=0,
            frame_size=(1920, 1080),
            frame_rate=30,
            depth_frame_size=(640, 480),
            depth_frame_rate=60,
            align_streams=False,
            preview_depth=False,
            device_options=(),
            record_depth=True,
            stream_preset=None,
    ):
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self.pitch = 0
        self.yaw = 0
        self.mouse_drag = False
        self.last_pos = (0, 0)
        self.depth_window = None
        self._needs_restart = False
        self.stream_preset = stream_preset
        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )

    def _initialize_device(
            self,
            device_id,
            color_frame_size,
            color_fps,
            depth_frame_size,
            depth_fps,
            device_options=(),
    ):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream(), PointStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            if not self._needs_restart:
                logger.error(
                    "Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error(
                "Camera with id {} not found. Initializing default camera.".
                format(device_id))
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(
            width=color_frame_size[0],
            height=color_frame_size[1],
            fps=color_fps,
            color_format="yuv",
            preset=self.stream_preset,
        )
        depthstream = DepthStream(
            width=depth_frame_size[0],
            height=depth_frame_size[1],
            fps=depth_fps,
            preset=self.stream_preset,
        )
        pointstream = PointStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)

        self.streams = [colorstream, depthstream, pointstream]
        if self.align_streams:
            dacstream = DACStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)
            dacstream.name = "depth"  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)
        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                           self.frame_size)

        self.update_menu()
        self._needs_restart = False

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR,
                               rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next(
                    (s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {
            "device_id":
            self.device.device_id if self.device is not None else 0,
            "frame_size":
            self.frame_size,
            "frame_rate":
            self.frame_rate,
            "depth_frame_size":
            self.depth_frame_size,
            "depth_frame_rate":
            self.depth_frame_rate,
            "preview_depth":
            self.preview_depth,
            "record_depth":
            self.record_depth,
            "align_streams":
            self.align_streams,
            "device_options":
            self.controls.export_presets() if self.controls is not None else
            (),
            "stream_preset":
            self.stream_preset,
        }

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(
                self.streams[0].stream)
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(
                self.streams[1].stream)
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart:
            self.restart_device()
            time.sleep(0.05)
        elif not self.online:
            time.sleep(0.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            logger.warning(
                "Realsense failed to provide frames. Attempting to reinit.")
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(
            ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(
            ui.Switch("preview_depth", self, label="Preview Depth"))
        self.menu.append(
            ui.Switch("align_streams",
                      self,
                      label="Align Streams",
                      setter=align_and_restart))

        def toggle_depth_display():
            def on_depth_mouse_button(window, button, action, mods):
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
                    self.mouse_drag = True
                if (button == glfw.GLFW_MOUSE_BUTTON_LEFT
                        and action == glfw.GLFW_RELEASE):
                    self.mouse_drag = False

            if self.depth_window is None:
                self.pitch = 0
                self.yaw = 0

                win_size = glfw.glfwGetWindowSize(self.g_pool.main_window)
                self.depth_window = glfw.glfwCreateWindow(
                    win_size[0], win_size[1], "3D Point Cloud")
                glfw.glfwSetMouseButtonCallback(self.depth_window,
                                                on_depth_mouse_button)
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(self.depth_window)
                gl_utils.basic_gl_setup()
                gl_utils.make_coord_system_norm_based()

                # refresh speed settings
                glfw.glfwSwapInterval(0)

                glfw.glfwMakeContextCurrent(active_window)

        native_presets = [
            ("None", None),
            ("Best Quality", rs_preset.RS_PRESET_BEST_QUALITY),
            ("Largest image", rs_preset.RS_PRESET_LARGEST_IMAGE),
            ("Highest framerate", rs_preset.RS_PRESET_HIGHEST_FRAMERATE),
        ]

        def set_stream_preset(val):
            if self.stream_preset != val:
                self.stream_preset = val
                self.restart_device()

        self.menu.append(
            ui.Selector(
                "stream_preset",
                self,
                setter=set_stream_preset,
                labels=[preset[0] for preset in native_presets],
                selection=[preset[1] for preset in native_presets],
                label="Stream preset",
            ))
        color_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_COLOR],
                             reverse=True)
        selector = ui.Selector(
            "frame_size",
            self,
            # setter=,
            selection=color_sizes,
            label="Resolution" if self.align_streams else "Color Resolution",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def color_fps_getter():
            avail_fps = [
                fps for fps in self._available_modes[rs_stream.RS_STREAM_COLOR]
                [self.frame_size] if self.depth_frame_rate % fps == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "frame_rate",
            self,
            # setter=,
            selection_getter=color_fps_getter,
            label="Color Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        if not self.align_streams:
            depth_sizes = sorted(
                self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True)
            selector = ui.Selector(
                "depth_frame_size",
                self,
                # setter=,
                selection=depth_sizes,
                label="Depth Resolution",
            )
            selector.read_only = self.stream_preset is not None
            self.menu.append(selector)

        def depth_fps_getter():
            avail_fps = [
                fps for fps in self._available_modes[rs_stream.RS_STREAM_DEPTH]
                [self.depth_frame_size] if fps % self.frame_rate == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "depth_frame_rate",
            self,
            selection_getter=depth_fps_getter,
            label="Depth Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(
                        self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info("Resetting some device options failed")
                    logger.debug("Reason: {}".format(err))
                finally:
                    self.controls.refresh()

        self.menu.append(ui.Button("Point Cloud Window", toggle_depth_display))
        sensor_control = ui.Growing_Menu(label="Sensor Settings")
        sensor_control.append(
            ui.Button("Reset device options to default", reset_options))
        for ctrl in sorted(self.controls.values(),
                           key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if (ctrl.range.min == 0.0 and ctrl.range.max == 1.0
                    and ctrl.range.step == 1.0):
                sensor_control.append(
                    ui.Switch("value",
                              ctrl,
                              label=ctrl.label,
                              off_val=0.0,
                              on_val=1.0))
            else:
                sensor_control.append(
                    ui.Slider(
                        "value",
                        ctrl,
                        label=ctrl.label,
                        min=ctrl.range.min,
                        max=ctrl.range.max,
                        step=ctrl.range.step,
                    ))
        self.menu.append(sensor_control)

    def gl_display(self):
        from math import floor

        if self.depth_window is not None and glfw.glfwWindowShouldClose(
                self.depth_window):
            glfw.glfwDestroyWindow(self.depth_window)
            self.depth_window = None

        if self.depth_window is not None and self._recent_depth_frame is not None:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self.depth_window)

            win_size = glfw.glfwGetFramebufferSize(self.depth_window)
            gl_utils.adjust_gl_view(win_size[0], win_size[1])
            pos = glfw.glfwGetCursorPos(self.depth_window)
            if self.mouse_drag:
                self.pitch = np.clip(self.pitch + (pos[1] - self.last_pos[1]),
                                     -80, 80)
                self.yaw = np.clip(self.yaw - (pos[0] - self.last_pos[0]),
                                   -120, 120)
            self.last_pos = pos

            glClearColor(0, 0, 0, 0)
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
            glMatrixMode(GL_PROJECTION)
            glLoadIdentity()
            gluPerspective(60, win_size[0] / win_size[1], 0.01, 20.0)
            glMatrixMode(GL_MODELVIEW)
            glLoadIdentity()
            gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
            glTranslatef(0, 0, 0.5)
            glRotated(self.pitch, 1, 0, 0)
            glRotated(self.yaw, 0, 1, 0)
            glTranslatef(0, 0, -0.5)

            # glPointSize(2)
            glEnable(GL_DEPTH_TEST)
            extrinsics = self.device.get_device_extrinsics(
                rs_stream.RS_STREAM_DEPTH, rs_stream.RS_STREAM_COLOR)
            depth_frame = self._recent_depth_frame
            color_frame = self._recent_frame
            depth_scale = self.device.depth_scale

            glEnableClientState(GL_VERTEX_ARRAY)

            pointcloud = self.device.pointcloud
            glVertexPointer(3, GL_FLOAT, 0, pointcloud)
            glEnableClientState(GL_COLOR_ARRAY)
            depth_to_color = np.zeros(
                depth_frame.height * depth_frame.width * 3, np.uint8)
            rsutilwrapper.project_pointcloud_to_pixel(
                depth_to_color,
                self.device.depth_intrinsics,
                self.device.color_intrinsics,
                extrinsics,
                pointcloud,
                self._recent_frame.bgr,
            )
            glColorPointer(3, GL_UNSIGNED_BYTE, 0, depth_to_color)
            glDrawArrays(GL_POINTS, 0, depth_frame.width * depth_frame.height)
            gl_utils.glFlush()
            glDisable(GL_DEPTH_TEST)
            # gl_utils.make_coord_system_norm_based()
            glfw.glfwSwapBuffers(self.depth_window)
            glfw.glfwMakeContextCurrent(active_window)

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def restart_device(
        self,
        device_id=None,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if device_id is None:
            if self.device is not None:
                device_id = self.device.device_id
            else:
                device_id = 0
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all({
            "subject": "realsense_source.restart",
            "device_id": device_id,
            "color_frame_size": color_frame_size,
            "color_fps": color_fps,
            "depth_frame_size": depth_frame_size,
            "depth_fps": depth_fps,
            "device_options": device_options,
        })

    def on_click(self, pos, button, action):
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
            self.mouse_drag = True
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
            self.mouse_drag = False

    def on_notify(self, notification):
        if notification["subject"] == "realsense_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(video_path,
                                            fps=self.depth_frame_rate,
                                            use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
示例#4
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_eye_video_overlay import Vis_Eye_Video_Overlay
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(f for f in glob(os.path.join(rec_dir, "world.*"))
                              if os.path.splitext(f)[1] in valid_ext)
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool,
                          source_path=video_path,
                          fill_gaps=True,
                          timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_dir=rec_dir,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(
            os.getpid()))
示例#5
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """

    def __init__(
        self,
        g_pool,
        device_id=0,
        frame_size=(1920, 1080),
        frame_rate=30,
        depth_frame_size=(640, 480),
        depth_frame_rate=60,
        align_streams=False,
        preview_depth=False,
        device_options=(),
        record_depth=True,
        stream_preset=None,
    ):
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self.pitch = 0
        self.yaw = 0
        self.mouse_drag = False
        self.last_pos = (0, 0)
        self.depth_window = None
        self._needs_restart = False
        self.stream_preset = stream_preset
        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )

    def _initialize_device(
        self,
        device_id,
        color_frame_size,
        color_fps,
        depth_frame_size,
        depth_fps,
        device_options=(),
    ):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream(), PointStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            if not self._needs_restart:
                logger.error("Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error(
                "Camera with id {} not found. Initializing default camera.".format(
                    device_id
                )
            )
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(
            width=color_frame_size[0],
            height=color_frame_size[1],
            fps=color_fps,
            color_format="yuv",
            preset=self.stream_preset,
        )
        depthstream = DepthStream(
            width=depth_frame_size[0],
            height=depth_frame_size[1],
            fps=depth_fps,
            preset=self.stream_preset,
        )
        pointstream = PointStream(
            width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps
        )

        self.streams = [colorstream, depthstream, pointstream]
        if self.align_streams:
            dacstream = DACStream(
                width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps
            )
            dacstream.name = "depth"  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)
        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(
            self.g_pool.user_dir, self.name, self.frame_size
        )

        self.update_menu()
        self._needs_restart = False

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR, rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next((s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {
            "device_id": self.device.device_id if self.device is not None else 0,
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
            "align_streams": self.align_streams,
            "device_options": self.controls.export_presets()
            if self.controls is not None
            else (),
            "stream_preset": self.stream_preset,
        }

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(
                self.streams[0].stream
            )
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(
                self.streams[1].stream
            )
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart:
            self.restart_device()
            time.sleep(0.05)
        elif not self.online:
            time.sleep(0.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            logger.warning("Realsense failed to provide frames. Attempting to reinit.")
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(ui.Switch("preview_depth", self, label="Preview Depth"))
        self.menu.append(
            ui.Switch(
                "align_streams", self, label="Align Streams", setter=align_and_restart
            )
        )

        def toggle_depth_display():
            def on_depth_mouse_button(window, button, action, mods):
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
                    self.mouse_drag = True
                if (
                    button == glfw.GLFW_MOUSE_BUTTON_LEFT
                    and action == glfw.GLFW_RELEASE
                ):
                    self.mouse_drag = False

            if self.depth_window is None:
                self.pitch = 0
                self.yaw = 0

                win_size = glfw.glfwGetWindowSize(self.g_pool.main_window)
                self.depth_window = glfw.glfwCreateWindow(
                    win_size[0], win_size[1], "3D Point Cloud"
                )
                glfw.glfwSetMouseButtonCallback(
                    self.depth_window, on_depth_mouse_button
                )
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(self.depth_window)
                gl_utils.basic_gl_setup()
                gl_utils.make_coord_system_norm_based()

                # refresh speed settings
                glfw.glfwSwapInterval(0)

                glfw.glfwMakeContextCurrent(active_window)

        native_presets = [
            ("None", None),
            ("Best Quality", rs_preset.RS_PRESET_BEST_QUALITY),
            ("Largest image", rs_preset.RS_PRESET_LARGEST_IMAGE),
            ("Highest framerate", rs_preset.RS_PRESET_HIGHEST_FRAMERATE),
        ]

        def set_stream_preset(val):
            if self.stream_preset != val:
                self.stream_preset = val
                self.restart_device()

        self.menu.append(
            ui.Selector(
                "stream_preset",
                self,
                setter=set_stream_preset,
                labels=[preset[0] for preset in native_presets],
                selection=[preset[1] for preset in native_presets],
                label="Stream preset",
            )
        )
        color_sizes = sorted(
            self._available_modes[rs_stream.RS_STREAM_COLOR], reverse=True
        )
        selector = ui.Selector(
            "frame_size",
            self,
            # setter=,
            selection=color_sizes,
            label="Resolution" if self.align_streams else "Color Resolution",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def color_fps_getter():
            avail_fps = [
                fps
                for fps in self._available_modes[rs_stream.RS_STREAM_COLOR][
                    self.frame_size
                ]
                if self.depth_frame_rate % fps == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "frame_rate",
            self,
            # setter=,
            selection_getter=color_fps_getter,
            label="Color Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        if not self.align_streams:
            depth_sizes = sorted(
                self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True
            )
            selector = ui.Selector(
                "depth_frame_size",
                self,
                # setter=,
                selection=depth_sizes,
                label="Depth Resolution",
            )
            selector.read_only = self.stream_preset is not None
            self.menu.append(selector)

        def depth_fps_getter():
            avail_fps = [
                fps
                for fps in self._available_modes[rs_stream.RS_STREAM_DEPTH][
                    self.depth_frame_size
                ]
                if fps % self.frame_rate == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "depth_frame_rate",
            self,
            selection_getter=depth_fps_getter,
            label="Depth Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info("Resetting some device options failed")
                    logger.debug("Reason: {}".format(err))
                finally:
                    self.controls.refresh()

        self.menu.append(ui.Button("Point Cloud Window", toggle_depth_display))
        sensor_control = ui.Growing_Menu(label="Sensor Settings")
        sensor_control.append(
            ui.Button("Reset device options to default", reset_options)
        )
        for ctrl in sorted(self.controls.values(), key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if (
                ctrl.range.min == 0.0
                and ctrl.range.max == 1.0
                and ctrl.range.step == 1.0
            ):
                sensor_control.append(
                    ui.Switch("value", ctrl, label=ctrl.label, off_val=0.0, on_val=1.0)
                )
            else:
                sensor_control.append(
                    ui.Slider(
                        "value",
                        ctrl,
                        label=ctrl.label,
                        min=ctrl.range.min,
                        max=ctrl.range.max,
                        step=ctrl.range.step,
                    )
                )
        self.menu.append(sensor_control)

    def gl_display(self):
        from math import floor

        if self.depth_window is not None and glfw.glfwWindowShouldClose(
            self.depth_window
        ):
            glfw.glfwDestroyWindow(self.depth_window)
            self.depth_window = None

        if self.depth_window is not None and self._recent_depth_frame is not None:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self.depth_window)

            win_size = glfw.glfwGetFramebufferSize(self.depth_window)
            gl_utils.adjust_gl_view(win_size[0], win_size[1])
            pos = glfw.glfwGetCursorPos(self.depth_window)
            if self.mouse_drag:
                self.pitch = np.clip(self.pitch + (pos[1] - self.last_pos[1]), -80, 80)
                self.yaw = np.clip(self.yaw - (pos[0] - self.last_pos[0]), -120, 120)
            self.last_pos = pos

            glClearColor(0, 0, 0, 0)
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
            glMatrixMode(GL_PROJECTION)
            glLoadIdentity()
            gluPerspective(60, win_size[0] / win_size[1], 0.01, 20.0)
            glMatrixMode(GL_MODELVIEW)
            glLoadIdentity()
            gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
            glTranslatef(0, 0, 0.5)
            glRotated(self.pitch, 1, 0, 0)
            glRotated(self.yaw, 0, 1, 0)
            glTranslatef(0, 0, -0.5)

            # glPointSize(2)
            glEnable(GL_DEPTH_TEST)
            extrinsics = self.device.get_device_extrinsics(
                rs_stream.RS_STREAM_DEPTH, rs_stream.RS_STREAM_COLOR
            )
            depth_frame = self._recent_depth_frame
            color_frame = self._recent_frame
            depth_scale = self.device.depth_scale

            glEnableClientState(GL_VERTEX_ARRAY)

            pointcloud = self.device.pointcloud
            glVertexPointer(3, GL_FLOAT, 0, pointcloud)
            glEnableClientState(GL_COLOR_ARRAY)
            depth_to_color = np.zeros(
                depth_frame.height * depth_frame.width * 3, np.uint8
            )
            rsutilwrapper.project_pointcloud_to_pixel(
                depth_to_color,
                self.device.depth_intrinsics,
                self.device.color_intrinsics,
                extrinsics,
                pointcloud,
                self._recent_frame.bgr,
            )
            glColorPointer(3, GL_UNSIGNED_BYTE, 0, depth_to_color)
            glDrawArrays(GL_POINTS, 0, depth_frame.width * depth_frame.height)
            gl_utils.glFlush()
            glDisable(GL_DEPTH_TEST)
            # gl_utils.make_coord_system_norm_based()
            glfw.glfwSwapBuffers(self.depth_window)
            glfw.glfwMakeContextCurrent(active_window)

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3)
        )

    def restart_device(
        self,
        device_id=None,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if device_id is None:
            if self.device is not None:
                device_id = self.device.device_id
            else:
                device_id = 0
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all(
            {
                "subject": "realsense_source.restart",
                "device_id": device_id,
                "color_frame_size": color_frame_size,
                "color_fps": color_fps,
                "depth_frame_size": depth_frame_size,
                "depth_fps": depth_fps,
                "device_options": device_options,
            }
        )

    def on_click(self, pos, button, action):
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
            self.mouse_drag = True
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
            self.mouse_drag = False

    def on_notify(self, notification):
        if notification["subject"] == "realsense_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(
            video_path, fps=self.depth_frame_rate, use_timestamps=True
        )

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
示例#6
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """
    def __init__(self, g_pool, device_id=0,
                 frame_size=(640, 480), frame_rate=30,
                 depth_frame_size=(640, 480), depth_frame_rate=30,
                 align_streams=False, preview_depth=False,
                 device_options=(), record_depth=True):
        super().__init__(g_pool)
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._initialize_device(device_id, frame_size, frame_rate,
                                depth_frame_size, depth_frame_rate, device_options)

    def _initialize_device(self, device_id,
                           color_frame_size, color_fps,
                           depth_frame_size, depth_fps,
                           device_options=()):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            logger.error("Camera failed to initialize. No cameras connected.")
            self.device = None
            return

        if self.device is not None:
            self.device.stop()

        if device_id >= len(devices):
            logger.error("Camera with id {} not found. Initializing default camera.".format(device_id))
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(width=color_frame_size[0],
                                  height=color_frame_size[1],
                                  fps=color_fps, color_format='yuv')
        depthstream = DepthStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1], fps=depth_fps)

        self.streams = [colorstream, depthstream]
        if self.align_streams:
            dacstream = DACStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1], fps=depth_fps)
            dacstream.name = 'depth'  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)

        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size)

        self.deinit_gui()
        self.init_gui()

    def _enumerate_formats(self, device_id):
        '''Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        '''
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR, rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next((s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.service.stop()
        super().cleanup()

    def get_init_dict(self):
        return {'device_id': self.device.device_id,
                'frame_size': self.frame_size,
                'frame_rate': self.frame_rate,
                'depth_frame_size': self.depth_frame_size,
                'depth_frame_rate': self.depth_frame_rate,
                'preview_depth': self.preview_depth,
                'record_depth': self.record_depth,
                'align_streams': self.align_streams,
                'device_options': self.controls.export_presets()}

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(self.streams[0].stream)
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(self.streams[1].stream)
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        try:
            color_frame, depth_frame = self.get_frames()
        except TimeoutError:
            self._recent_frame = None
            self._recent_depth_frame = None
            # react to timeout
        except pyrs.RealsenseError as err:
            self._recent_frame = None
            self._recent_depth_frame = None
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events['frame'] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events['depth_frame'] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def init_gui(self):
        from pyglui import ui
        ui_elements = []

        # avoid duplicated elements since _initialize_device() calls init_gui as well
        self.deinit_gui()

        if self.device is None:
            ui_elements.append(ui.Info_Text('Capture initialization failed.'))
            self.g_pool.capture_source_menu.extend(ui_elements)
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        ui_elements.append(ui.Switch('record_depth', self, label='Record Depth Stream'))
        ui_elements.append(ui.Switch('preview_depth', self, label='Preview Depth'))
        ui_elements.append(ui.Switch('align_streams', self, label='Align Streams',
                                     setter=align_and_restart))

        color_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_COLOR], reverse=True)
        ui_elements.append(ui.Selector(
            'frame_size', self,
            # setter=,
            selection=color_sizes,
            label= 'Resolution' if self.align_streams else 'Color Resolution'
        ))

        def color_fps_getter():
            avail_fps = self._available_modes[rs_stream.RS_STREAM_COLOR][self.frame_size]
            return avail_fps, [str(fps) for fps in avail_fps]
        ui_elements.append(ui.Selector(
            'frame_rate', self,
            # setter=,
            selection_getter=color_fps_getter,
            label='Color Frame Rate'
        ))

        if not self.align_streams:
            depth_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True)
            ui_elements.append(ui.Selector(
                'depth_frame_size', self,
                # setter=,
                selection=depth_sizes,
                label='Depth Resolution'
            ))

        def depth_fps_getter():
            avail_fps = self._available_modes[rs_stream.RS_STREAM_DEPTH][self.depth_frame_size]
            return avail_fps, [str(fps) for fps in avail_fps]
        ui_elements.append(ui.Selector(
            'depth_frame_rate', self,
            selection_getter=depth_fps_getter,
            label='Depth Frame Rate'
        ))

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info('Resetting some device options failed')
                    logger.debug('Reason: {}'.format(err))
                finally:
                    self.controls.refresh()

        sensor_control = ui.Growing_Menu(label='Sensor Settings')
        sensor_control.append(ui.Button('Reset device options to default', reset_options))
        for ctrl in sorted(self.controls.values(), key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if ctrl.range.min == 0.0 and ctrl.range.max == 1.0 and ctrl.range.step == 1.0:
                sensor_control.append(ui.Switch('value', ctrl, label=ctrl.label,
                                                off_val=0.0, on_val=1.0))
            else:
                sensor_control.append(ui.Slider('value', ctrl,
                                                label=ctrl.label,
                                                min=ctrl.range.min,
                                                max=ctrl.range.max,
                                                step=ctrl.range.step))
        ui_elements.append(sensor_control)
        self.g_pool.capture_source_menu.extend(ui_elements)

    def gl_display(self):
        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
        elif not self.preview_depth and self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(self._recent_frame.yuv_buffer,self._recent_frame.width,self._recent_frame.height)
            gl_utils.glFlush()

        gl_utils.make_coord_system_norm_based()
        self.g_pool.image_tex.draw()
        if not self.online:
            cygl.utils.draw_gl_texture(np.zeros((1, 1, 3), dtype=np.uint8), alpha=0.4)
        gl_utils.make_coord_system_pixel_based((self.frame_size[1], self.frame_size[0], 3))

    def restart_device(self, device_id=None, color_frame_size=None, color_fps=None,
                       depth_frame_size=None, depth_fps=None, device_options=None):
        if device_id is None:
            device_id = self.device.device_id
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        self.notify_all({'subject': 'realsense_source.restart',
                         'device_id': device_id,
                         'color_frame_size': color_frame_size,
                         'color_fps': color_fps,
                         'depth_frame_size': depth_frame_size,
                         'depth_fps': depth_fps,
                         'device_options': device_options})

    def on_notify(self, notification):
        if notification['subject'] == 'realsense_source.restart':
            kwargs = notification.copy()
            del kwargs['subject']
            self._initialize_device(**kwargs)
        elif notification['subject'] == 'recording.started':
            self.start_depth_recording(notification['rec_path'])
        elif notification['subject'] == 'recording.stopped':
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning('Depth video recording has been started already')
            return

        video_path = os.path.join(rec_loc, 'depth.mp4')
        self.depth_video_writer = AV_Writer(video_path, fps=self.depth_frame_rate, use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning('Depth video recording was not running')
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def intrinsics(self):
        return self._intrinsics

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
示例#7
0
def export(
        rec_dir,
        user_dir,
        min_data_confidence,
        start_frame=None,
        end_frame=None,
        plugin_initializers=(),
        out_file_path=None,
        pre_computed={},
):

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    print(start_status)
    yield start_status, 0

    try:
        pm.update_recording_to_recent(rec_dir)

        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        pm.update_recording_to_recent(rec_dir)

        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        video_path = [
            f for f in glob(os.path.join(rec_dir, "world.*"))
            if os.path.splitext(f)[1] in valid_ext
        ][0]
        cap = init_playback_source(g_pool, source_path=video_path, timing=None)

        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = "world_viz.mp4"
            out_file_path = os.path.expanduser(
                os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positons and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close()
        writer = None

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        print("Video export with pid {} was canceled.".format(os.getpid()))
    except Exception as e:
        from time import sleep
        import traceback

        trace = traceback.format_exc()
        print("Process Export (pid: {}) crashed with trace:\n{}".format(
            os.getpid(), trace))
        yield e
        sleep(1.0)
示例#8
0
文件: exporter.py 项目: xiuxi/pupil
def export(rec_dir,
           user_dir,
           min_data_confidence,
           start_frame=None,
           end_frame=None,
           plugin_initializers=(),
           out_file_path=None,
           pre_computed={}):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))
    start_status = 'Starting video export with pid: {}'.format(os.getpid())
    print(start_status)
    yield start_status, 0

    try:
        update_recording_to_recent(rec_dir)

        vis_plugins = sorted([
            Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points,
            Vis_Watermark, Vis_Scan_Path, Vis_Eye_Video_Overlay
        ],
                             key=lambda x: x.__name__)
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(import_runtime_plugins(
            os.path.join(user_dir, 'plugins')),
                              key=lambda x: x.__name__)

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        update_recording_to_recent(rec_dir)

        video_path = [
            f for f in glob(os.path.join(rec_dir, "world.*"))
            if os.path.splitext(f)[-1] in ('.mp4', '.mkv', '.avi', '.mjpeg')
        ][0]
        pupil_data_path = os.path.join(rec_dir, "pupil_data")
        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = 'exporter'
        g_pool.min_data_confidence = min_data_confidence
        cap = File_Source(g_pool, video_path)
        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = 'world_viz.mp4'
            out_file_path = os.path.expanduser(
                os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []
        # load pupil_positions, gaze_positions
        pupil_data = pre_computed.get("pupil_data") or load_object(
            pupil_data_path)
        g_pool.pupil_data = pupil_data
        g_pool.pupil_positions = pre_computed.get(
            "pupil_positions") or pupil_data['pupil_positions']
        g_pool.gaze_positions = pre_computed.get(
            "gaze_positions") or pupil_data['gaze_positions']
        g_pool.fixations = []  # populated by the fixation detector plugin

        g_pool.pupil_positions_by_frame = correlate_data(
            g_pool.pupil_positions, g_pool.timestamps)
        g_pool.gaze_positions_by_frame = correlate_data(
            g_pool.gaze_positions, g_pool.timestamps)
        g_pool.fixations_by_frame = [
            [] for x in g_pool.timestamps
        ]  # populated by the fixation detector plugin

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoFileError:
                break

            events = {'frame': frame}
            # new positons and events
            events['gaze_positions'] = g_pool.gaze_positions_by_frame[
                frame.index]
            events['pupil_positions'] = g_pool.pupil_positions_by_frame[
                frame.index]

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n['_notify_time_'] < time():
                    del n['_notify_time_']
                    del g_pool.delayed_notifications[n['subject']]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield 'Exporting', current_frame

        writer.close()
        writer = None

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield 'Export done. This took {:.0f} seconds.'.format(
            duration), current_frame

    except GeneratorExit:
        print('Video export with pid {} was canceled.'.format(os.getpid()))
    except:
        from time import sleep
        import traceback
        trace = traceback.format_exc()
        print('Process Export (pid: {}) crashed with trace:\n{}'.format(
            os.getpid(), trace))
        sleep(1.0)
示例#9
0
class Realsense2_Source(Base_Source):
    def __init__(
        self,
        g_pool,
        device_id=None,
        frame_size=DEFAULT_COLOR_SIZE,
        frame_rate=DEFAULT_COLOR_FPS,
        depth_frame_size=DEFAULT_DEPTH_SIZE,
        depth_frame_rate=DEFAULT_DEPTH_FPS,
        preview_depth=False,
        device_options=(),
        record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")

    def _initialize_device(
        self,
        device_id,
        color_frame_size,
        color_fps,
        depth_frame_size,
        depth_fps,
        device_options=(),
    ):
        self.stop_pipeline()
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if device_id is None:
            device_id = self.device_id

        if device_id is None:  # FIXME these two if blocks look ugly.
            return

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)
        logger.debug(
            "device_id: {} self._available_modes: {}".format(
                device_id, str(self._available_modes)
            )
        )

        if (
            color_frame_size is not None
            and depth_frame_size is not None
            and color_fps is not None
            and depth_fps is not None
        ):
            color_frame_size = tuple(color_frame_size)
            depth_frame_size = tuple(depth_frame_size)

            logger.debug(
                "Initialize with Color {}@{}\tDepth {}@{}".format(
                    color_frame_size, color_fps, depth_frame_size, depth_fps
                )
            )

            # make sure the frame rates are compatible with the given frame sizes
            color_fps = self._get_valid_frame_rate(
                rs.stream.color, color_frame_size, color_fps
            )
            depth_fps = self._get_valid_frame_rate(
                rs.stream.depth, depth_frame_size, depth_fps
            )

            self.frame_size_backup = color_frame_size
            self.depth_frame_size_backup = depth_frame_size
            self.frame_rate_backup = color_fps
            self.depth_frame_rate_backup = depth_fps

            config = self._prep_configuration(
                color_frame_size, color_fps, depth_frame_size, depth_fps
            )
        else:
            config = self._get_default_config()
            self.frame_size_backup = DEFAULT_COLOR_SIZE
            self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
            self.frame_rate_backup = DEFAULT_COLOR_FPS
            self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        try:
            self.pipeline_profile = self.pipeline.start(config)
        except RuntimeError as re:
            logger.error("Cannot start pipeline! " + str(re))
            self.pipeline_profile = None
        else:
            self.stream_profiles = {
                s.stream_type(): s.as_video_stream_profile()
                for s in self.pipeline_profile.get_streams()
            }
            logger.debug("Pipeline started for device " + device_id)
            logger.debug("Stream profiles: " + str(self.stream_profiles))

            self._intrinsics = load_intrinsics(
                self.g_pool.user_dir, self.name, self.frame_size
            )
            self.update_menu()
            self._needs_restart = False

    def _prep_configuration(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
    ):
        config = rs.config()

        # only use these two formats
        color_format = rs.format.yuyv
        depth_format = rs.format.z16

        config.enable_stream(
            rs.stream.depth,
            depth_frame_size[0],
            depth_frame_size[1],
            depth_format,
            depth_fps,
        )

        config.enable_stream(
            rs.stream.color,
            color_frame_size[0],
            color_frame_size[1],
            color_format,
            color_fps,
        )

        return config

    def _get_default_config(self):
        config = rs.config()  # default config is RGB8, we want YUYV
        config.enable_stream(
            rs.stream.color,
            DEFAULT_COLOR_SIZE[0],
            DEFAULT_COLOR_SIZE[1],
            rs.format.yuyv,
            DEFAULT_COLOR_FPS,
        )
        config.enable_stream(
            rs.stream.depth,
            DEFAULT_DEPTH_SIZE[0],
            DEFAULT_DEPTH_SIZE[1],
            rs.format.z16,
            DEFAULT_DEPTH_FPS,
        )
        return config

    def _get_valid_frame_rate(self, stream_type, frame_size, fps):
        assert stream_type == rs.stream.color or stream_type == rs.stream.depth

        if not self._available_modes or stream_type not in self._available_modes:
            logger.warning(
                "_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError("Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".format(
                    stream_type, frame_size
                )
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps) for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps".format(
                    old_fps, frame_size, fps
                )
            )

        return fps

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}

        if self.context is None:
            return formats

        devices = self.context.query_devices()
        current_device = None

        for d in devices:
            try:
                serial = d.get_info(rs.camera_info.serial_number)
            except RuntimeError as re:
                logger.error("Device no longer available " + str(re))
            else:
                if device_id == serial:
                    current_device = d

        if current_device is None:
            return formats
        logger.debug("Found the current device: " + device_id)

        sensors = current_device.query_sensors()
        for s in sensors:
            stream_profiles = s.get_stream_profiles()
            for sp in stream_profiles:
                vp = sp.as_video_stream_profile()
                stream_type = vp.stream_type()

                if stream_type not in (rs.stream.color, rs.stream.depth):
                    continue
                elif vp.format() not in (rs.format.z16, rs.format.yuyv):
                    continue

                formats.setdefault(stream_type, {})
                stream_resolution = (vp.width(), vp.height())
                formats[stream_type].setdefault(stream_resolution, []).append(vp.fps())

        return formats

    def stop_pipeline(self):
        if self.online:
            try:
                self.pipeline_profile = None
                self.stream_profiles = None
                self.pipeline.stop()
                logger.debug("Pipeline stopped.")
            except RuntimeError as re:
                logger.error("Cannot stop the pipeline: " + str(re))

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.stop_pipeline()

    def get_init_dict(self):
        return {
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
        }

    def get_frames(self):
        if self.online:
            try:
                frames = self.pipeline.wait_for_frames(TIMEOUT)
            except RuntimeError as e:
                logger.error("get_frames: Timeout!")
                raise RuntimeError(e)
            else:
                current_time = self.g_pool.get_timestamp()

                color = None
                # if we're expecting color frames
                if rs.stream.color in self.stream_profiles:
                    color_frame = frames.get_color_frame()
                    last_color_frame_ts = color_frame.get_timestamp()
                    if self.last_color_frame_ts != last_color_frame_ts:
                        self.last_color_frame_ts = last_color_frame_ts
                        color = ColorFrame(
                            np.asanyarray(color_frame.get_data()),
                            current_time,
                            self.color_frame_index,
                        )
                        self.color_frame_index += 1

                depth = None
                # if we're expecting depth frames
                if rs.stream.depth in self.stream_profiles:
                    depth_frame = frames.get_depth_frame()
                    last_depth_frame_ts = depth_frame.get_timestamp()
                    if self.last_depth_frame_ts != last_depth_frame_ts:
                        self.last_depth_frame_ts = last_depth_frame_ts
                        depth = DepthFrame(
                            np.asanyarray(depth_frame.get_data()),
                            current_time,
                            self.depth_frame_index,
                        )
                        self.depth_frame_index += 1

                return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart or not self.online:
            logger.debug("recent_events -> restarting device")
            self.restart_device()
            time.sleep(0.01)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except RuntimeError as re:
            logger.warning("Realsense failed to provide frames." + str(re))
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame is not None:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame is not None:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        logger.debug("update_menu")
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if not self.online:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        self.menu.append(ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(ui.Switch("preview_depth", self, label="Preview Depth"))

        if self._available_modes is not None:

            def frame_size_selection_getter():
                if self.device_id:
                    frame_size = sorted(
                        self._available_modes[rs.stream.color], reverse=True
                    )
                    labels = ["({}, {})".format(t[0], t[1]) for t in frame_size]
                    return frame_size, labels
                else:
                    return [self.frame_size_backup], [str(self.frame_size_backup)]

            selector = ui.Selector(
                "frame_size",
                self,
                selection_getter=frame_size_selection_getter,
                label="Color Resolution",
            )
            self.menu.append(selector)

            def frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps
                        for fps in self._available_modes[rs.stream.color][
                            self.frame_size
                        ]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return [self.frame_rate_backup], [str(self.frame_rate_backup)]

            selector = ui.Selector(
                "frame_rate",
                self,
                selection_getter=frame_rate_selection_getter,
                label="Color Frame Rate",
            )
            self.menu.append(selector)

            def depth_frame_size_selection_getter():
                if self.device_id:
                    depth_sizes = sorted(
                        self._available_modes[rs.stream.depth], reverse=True
                    )
                    labels = ["({}, {})".format(t[0], t[1]) for t in depth_sizes]
                    return depth_sizes, labels
                else:
                    return (
                        [self.depth_frame_size_backup],
                        [str(self.depth_frame_size_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_size",
                self,
                selection_getter=depth_frame_size_selection_getter,
                label="Depth Resolution",
            )
            self.menu.append(selector)

            def depth_frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps
                        for fps in self._available_modes[rs.stream.depth][
                            self.depth_frame_size
                        ]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return (
                        [self.depth_frame_rate_backup],
                        [str(self.depth_frame_rate_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_rate",
                self,
                selection_getter=depth_frame_rate_selection_getter,
                label="Depth Frame Rate",
            )
            self.menu.append(selector)

            def reset_options():
                logger.debug("reset_options")
                self.reset_device(self.device_id)

            sensor_control = ui.Growing_Menu(label="Sensor Settings")
            sensor_control.append(
                ui.Button("Reset device options to default", reset_options)
            )
            self.menu.append(sensor_control)
        else:
            logger.debug("update_menu: self._available_modes is None")

    def gl_display(self):

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3)
        )

    def reset_device(self, device_id):
        logger.debug("reset_device")
        if device_id is None:
            device_id = self.device_id

        self.notify_all(
            {
                "subject": "realsense2_source.restart",
                "device_id": device_id,
                "color_frame_size": None,
                "color_fps": None,
                "depth_frame_size": None,
                "depth_fps": None,
                "device_options": [],  # FIXME
            }
        )

    def restart_device(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = []  # FIXME

        self.notify_all(
            {
                "subject": "realsense2_source.restart",
                "device_id": None,
                "color_frame_size": color_frame_size,
                "color_fps": color_fps,
                "depth_frame_size": depth_frame_size,
                "depth_fps": depth_fps,
                "device_options": device_options,
            }
        )
        logger.debug("self.restart_device --> self.notify_all")

    def on_notify(self, notification):
        logger.debug(
            'self.on_notify, notification["subject"]: ' + notification["subject"]
        )
        if notification["subject"] == "realsense2_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(
            video_path, fps=self.depth_frame_rate, use_timestamps=True
        )

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def device_id(self):
        if self.online:  # already running
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.serial_number
            )
        else:
            # set the first available device
            devices = self.context.query_devices()
            if devices:
                logger.info("device_id: first device by default.")
                return devices[0].get_info(rs.camera_info.serial_number)
            else:
                logger.debug("device_id: No device connected.")
                return None

    @property
    def frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.frame_size_backup
        except KeyError:
            return self.frame_size_backup
        except TypeError:
            return self.frame_size_backup

    @frame_size.setter
    def frame_size(self, new_size):
        if new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check FPS is in self.available modes
            return stream_profile.fps()
        except AttributeError:
            return self.frame_rate_backup
        except KeyError:
            return self.frame_rate_backup
        except TypeError:
            return self.frame_rate_backup

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.depth_frame_size_backup
        except KeyError:
            return self.depth_frame_size_backup
        except TypeError:
            return self.depth_frame_size_backup

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            return stream_profile.fps()
        except AttributeError:
            return self.depth_frame_rate_backup
        except KeyError:
            return self.depth_frame_rate_backup
        except TypeError:
            return self.depth_frame_rate_backup

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.pipeline_profile is not None and self.pipeline is not None

    @property
    def name(self):
        if self.online:
            return self.pipeline_profile.get_device().get_info(rs.camera_info.name)
        else:
            logger.debug(
                "self.name: Realsense2 not online. Falling back to Ghost capture"
            )
            return "Ghost capture"
示例#10
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse info.csv file
    with open(rec_dir + "/info.csv") as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))
    rec_version = read_rec_version(meta_info)
    logger.debug("Exporting a video from recording with version: %s" %
                 rec_version)

    if rec_version < VersionFormat('0.4'):
        video_path = rec_dir + "/world.avi"
        timestamps_path = rec_dir + "/timestamps.npy"
    else:
        video_path = rec_dir + "/world.mkv"
        timestamps_path = rec_dir + "/world_timestamps.npy"

    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    if rec_version < VersionFormat('0.4'):
        gaze_positions_by_frame = correlate_gaze_legacy(gaze_list, timestamps)
    else:
        gaze_positions_by_frame = correlate_gaze(gaze_list, timestamps)

    cap = autoCreateCapture(video_path, timestamps=timestamps_path)
    width, height = cap.frame_size

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps
    g.gaze_list = gaze_list
    g.gaze_positions_by_frame = gaze_positions_by_frame
    g.plugins = Plugin_List(g, plugin_by_name, plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = gaze_positions_by_frame[frame.index]
        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
示例#11
0
def export(should_terminate, frames_to_export, current_frame, rec_dir, user_dir, min_data_confidence,
           start_frame=None, end_frame=None, plugin_initializers=(), out_file_path=None,pre_computed={}):

    vis_plugins = sorted([Vis_Circle,Vis_Cross,Vis_Polyline,Vis_Light_Points,
        Vis_Watermark,Vis_Scan_Path,Vis_Eye_Video_Overlay], key=lambda x: x.__name__)
    analysis_plugins = sorted([ Pupil_Angle_3D_Fixation_Detector,
                               Gaze_Position_2D_Fixation_Detector], key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) if f[-3:] in ('mp4', 'mkv', 'avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")
    audio_path = os.path.join(rec_dir, "audio.mp4")

    meta_info = load_meta_info(rec_dir)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    # Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to {}".format(out_file_path))

    # Trim mark verification
    # make sure the trim marks (start frame, endframe) make sense:
    # We define them like python list slices, thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame is None:
        start_frame = 0

    # these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
    logger.debug(exp_info.format(start_frame, start_frame + frames_to_export.value, frames_to_export.value))

    # setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.meta_info = meta_info
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []
    # load pupil_positions, gaze_positions
    pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
    g_pool.pupil_data = pupil_data
    g_pool.pupil_positions = pre_computed.get("pupil_positions") or pupil_data['pupil_positions']
    g_pool.gaze_positions = pre_computed.get("gaze_positions") or pupil_data['gaze_positions']
    g_pool.fixations = [] # populated by the fixation detector plugin

    g_pool.pupil_positions_by_frame = correlate_data(g_pool.pupil_positions,g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(g_pool.gaze_positions,g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps]  # populated by the fixation detector plugin

    # add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported {} frames to {}.".format(current_frame.value, out_file_path))

            # explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {'frame':frame}
        # new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[frame.index]

        # publish delayed notifiactions when their time has come.
        for n in list(g_pool.delayed_notifications.values()):
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.recent_events(events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
    logger.info(result.format(current_frame.value, out_file_path, duration, effective_fps))
    return True
示例#12
0
def export(should_terminate,frames_to_export,current_frame, rec_dir,start_frame=None,end_frame=None,plugin_initializers=[],out_file_path=None):

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )



    #parse info.csv file
    with open(rec_dir + "/info.csv") as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )
    rec_version = read_rec_version(meta_info)
    logger.debug("Exporting a video from recording with version: %s"%rec_version)

    if rec_version < VersionFormat('0.4'):
        video_path = rec_dir + "/world.avi"
        timestamps_path = rec_dir + "/timestamps.npy"
    else:
        video_path = rec_dir + "/world.mkv"
        timestamps_path = rec_dir + "/world_timestamps.npy"

    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    if rec_version < VersionFormat('0.4'):
        positions_by_frame = correlate_gaze_legacy(gaze_list,timestamps)
    else:
        positions_by_frame = correlate_gaze(gaze_list,timestamps)

    cap = autoCreateCapture(video_path,timestamps=timestamps_path)
    width,height = cap.get_size()

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name =  os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name,file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s"%out_file_path)


    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps)==0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug("Will export from frame %s to frame %s. This means I will export %s frames."%(start_frame,start_frame+frames_to_export.value,frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.rec_dir = rec_dir
    g.rec_version = rec_version
    g.timestamps = timestamps
    g.gaze_list = gaze_list
    g.positions_by_frame = positions_by_frame
    g.plugins = Plugin_List(g,plugin_by_name,plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s."%(current_frame.value,out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['pupil_positions'] = positions_by_frame[frame.index]
        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,events)

        writer.write_video_frame(frame)
        current_frame.value +=1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    logger.info("Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"%(current_frame.value,out_file_path,duration,effective_fps))
    return True
示例#13
0
class Realsense2_Source(Base_Source):
    def __init__(
            self,
            g_pool,
            device_id=None,
            frame_size=DEFAULT_COLOR_SIZE,
            frame_rate=DEFAULT_COLOR_FPS,
            depth_frame_size=DEFAULT_DEPTH_SIZE,
            depth_frame_rate=DEFAULT_DEPTH_FPS,
            preview_depth=False,
            device_options=(),
            record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")

    def _initialize_device(
            self,
            device_id,
            color_frame_size,
            color_fps,
            depth_frame_size,
            depth_fps,
            device_options=(),
    ):
        self.stop_pipeline()
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if device_id is None:
            device_id = self.device_id

        if device_id is None:  # FIXME these two if blocks look ugly.
            return

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)
        logger.debug("device_id: {} self._available_modes: {}".format(
            device_id, str(self._available_modes)))

        if (color_frame_size is not None and depth_frame_size is not None
                and color_fps is not None and depth_fps is not None):
            color_frame_size = tuple(color_frame_size)
            depth_frame_size = tuple(depth_frame_size)

            logger.debug("Initialize with Color {}@{}\tDepth {}@{}".format(
                color_frame_size, color_fps, depth_frame_size, depth_fps))

            # make sure the frame rates are compatible with the given frame sizes
            color_fps = self._get_valid_frame_rate(rs.stream.color,
                                                   color_frame_size, color_fps)
            depth_fps = self._get_valid_frame_rate(rs.stream.depth,
                                                   depth_frame_size, depth_fps)

            self.frame_size_backup = color_frame_size
            self.depth_frame_size_backup = depth_frame_size
            self.frame_rate_backup = color_fps
            self.depth_frame_rate_backup = depth_fps

            config = self._prep_configuration(color_frame_size, color_fps,
                                              depth_frame_size, depth_fps)
        else:
            config = self._get_default_config()
            self.frame_size_backup = DEFAULT_COLOR_SIZE
            self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
            self.frame_rate_backup = DEFAULT_COLOR_FPS
            self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        try:
            self.pipeline_profile = self.pipeline.start(config)
        except RuntimeError as re:
            logger.error("Cannot start pipeline! " + str(re))
            self.pipeline_profile = None
        else:
            self.stream_profiles = {
                s.stream_type(): s.as_video_stream_profile()
                for s in self.pipeline_profile.get_streams()
            }
            logger.debug("Pipeline started for device " + device_id)
            logger.debug("Stream profiles: " + str(self.stream_profiles))

            self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                               self.frame_size)
            self.update_menu()
            self._needs_restart = False

    def _prep_configuration(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
    ):
        config = rs.config()

        # only use these two formats
        color_format = rs.format.yuyv
        depth_format = rs.format.z16

        config.enable_stream(
            rs.stream.depth,
            depth_frame_size[0],
            depth_frame_size[1],
            depth_format,
            depth_fps,
        )

        config.enable_stream(
            rs.stream.color,
            color_frame_size[0],
            color_frame_size[1],
            color_format,
            color_fps,
        )

        return config

    def _get_default_config(self):
        config = rs.config()  # default config is RGB8, we want YUYV
        config.enable_stream(
            rs.stream.color,
            DEFAULT_COLOR_SIZE[0],
            DEFAULT_COLOR_SIZE[1],
            rs.format.yuyv,
            DEFAULT_COLOR_FPS,
        )
        config.enable_stream(
            rs.stream.depth,
            DEFAULT_DEPTH_SIZE[0],
            DEFAULT_DEPTH_SIZE[1],
            rs.format.z16,
            DEFAULT_DEPTH_FPS,
        )
        return config

    def _get_valid_frame_rate(self, stream_type, frame_size, fps):
        assert stream_type == rs.stream.color or stream_type == rs.stream.depth

        if not self._available_modes or stream_type not in self._available_modes:
            logger.warning(
                "_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError(
                    "Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".
                format(stream_type, frame_size))
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps)
                for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps"
                .format(old_fps, frame_size, fps))

        return fps

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}

        if self.context is None:
            return formats

        devices = self.context.query_devices()
        current_device = None

        for d in devices:
            try:
                serial = d.get_info(rs.camera_info.serial_number)
            except RuntimeError as re:
                logger.error("Device no longer available " + str(re))
            else:
                if device_id == serial:
                    current_device = d

        if current_device is None:
            return formats
        logger.debug("Found the current device: " + device_id)

        sensors = current_device.query_sensors()
        for s in sensors:
            stream_profiles = s.get_stream_profiles()
            for sp in stream_profiles:
                vp = sp.as_video_stream_profile()
                stream_type = vp.stream_type()

                if stream_type not in (rs.stream.color, rs.stream.depth):
                    continue
                elif vp.format() not in (rs.format.z16, rs.format.yuyv):
                    continue

                formats.setdefault(stream_type, {})
                stream_resolution = (vp.width(), vp.height())
                formats[stream_type].setdefault(stream_resolution,
                                                []).append(vp.fps())

        return formats

    def stop_pipeline(self):
        if self.online:
            try:
                self.pipeline_profile = None
                self.stream_profiles = None
                self.pipeline.stop()
                logger.debug("Pipeline stopped.")
            except RuntimeError as re:
                logger.error("Cannot stop the pipeline: " + str(re))

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.stop_pipeline()

    def get_init_dict(self):
        return {
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
        }

    def get_frames(self):
        if self.online:
            try:
                frames = self.pipeline.wait_for_frames(TIMEOUT)
            except RuntimeError as e:
                logger.error("get_frames: Timeout!")
                raise RuntimeError(e)
            else:
                current_time = self.g_pool.get_timestamp()

                color = None
                # if we're expecting color frames
                if rs.stream.color in self.stream_profiles:
                    color_frame = frames.get_color_frame()
                    last_color_frame_ts = color_frame.get_timestamp()
                    if self.last_color_frame_ts != last_color_frame_ts:
                        self.last_color_frame_ts = last_color_frame_ts
                        color = ColorFrame(
                            np.asanyarray(color_frame.get_data()),
                            current_time,
                            self.color_frame_index,
                        )
                        self.color_frame_index += 1

                depth = None
                # if we're expecting depth frames
                if rs.stream.depth in self.stream_profiles:
                    depth_frame = frames.get_depth_frame()
                    last_depth_frame_ts = depth_frame.get_timestamp()
                    if self.last_depth_frame_ts != last_depth_frame_ts:
                        self.last_depth_frame_ts = last_depth_frame_ts
                        depth = DepthFrame(
                            np.asanyarray(depth_frame.get_data()),
                            current_time,
                            self.depth_frame_index,
                        )
                        self.depth_frame_index += 1

                return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart or not self.online:
            logger.debug("recent_events -> restarting device")
            self.restart_device()
            time.sleep(0.01)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except RuntimeError as re:
            logger.warning("Realsense failed to provide frames." + str(re))
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame is not None:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame is not None:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        logger.debug("update_menu")
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if not self.online:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        self.menu.append(
            ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(
            ui.Switch("preview_depth", self, label="Preview Depth"))

        if self._available_modes is not None:

            def frame_size_selection_getter():
                if self.device_id:
                    frame_size = sorted(self._available_modes[rs.stream.color],
                                        reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in frame_size
                    ]
                    return frame_size, labels
                else:
                    return [self.frame_size_backup
                            ], [str(self.frame_size_backup)]

            selector = ui.Selector(
                "frame_size",
                self,
                selection_getter=frame_size_selection_getter,
                label="Color Resolution",
            )
            self.menu.append(selector)

            def frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.color][
                            self.frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return [self.frame_rate_backup
                            ], [str(self.frame_rate_backup)]

            selector = ui.Selector(
                "frame_rate",
                self,
                selection_getter=frame_rate_selection_getter,
                label="Color Frame Rate",
            )
            self.menu.append(selector)

            def depth_frame_size_selection_getter():
                if self.device_id:
                    depth_sizes = sorted(
                        self._available_modes[rs.stream.depth], reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in depth_sizes
                    ]
                    return depth_sizes, labels
                else:
                    return (
                        [self.depth_frame_size_backup],
                        [str(self.depth_frame_size_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_size",
                self,
                selection_getter=depth_frame_size_selection_getter,
                label="Depth Resolution",
            )
            self.menu.append(selector)

            def depth_frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.depth][
                            self.depth_frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return (
                        [self.depth_frame_rate_backup],
                        [str(self.depth_frame_rate_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_rate",
                self,
                selection_getter=depth_frame_rate_selection_getter,
                label="Depth Frame Rate",
            )
            self.menu.append(selector)

            def reset_options():
                logger.debug("reset_options")
                self.reset_device(self.device_id)

            sensor_control = ui.Growing_Menu(label="Sensor Settings")
            sensor_control.append(
                ui.Button("Reset device options to default", reset_options))
            self.menu.append(sensor_control)
        else:
            logger.debug("update_menu: self._available_modes is None")

    def gl_display(self):

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def reset_device(self, device_id):
        logger.debug("reset_device")
        if device_id is None:
            device_id = self.device_id

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": device_id,
            "color_frame_size": None,
            "color_fps": None,
            "depth_frame_size": None,
            "depth_fps": None,
            "device_options": [],  # FIXME
        })

    def restart_device(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = []  # FIXME

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": None,
            "color_frame_size": color_frame_size,
            "color_fps": color_fps,
            "depth_frame_size": depth_frame_size,
            "depth_fps": depth_fps,
            "device_options": device_options,
        })
        logger.debug("self.restart_device --> self.notify_all")

    def on_click(self, pos, button, action):
        pass

    def on_notify(self, notification):
        logger.debug('self.on_notify, notification["subject"]: ' +
                     notification["subject"])
        if notification["subject"] == "realsense2_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(video_path,
                                            fps=self.depth_frame_rate,
                                            use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def device_id(self):
        if self.online:  # already running
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.serial_number)
        else:
            # set the first available device
            devices = self.context.query_devices()
            if devices:
                logger.info("device_id: first device by default.")
                return devices[0].get_info(rs.camera_info.serial_number)
            else:
                logger.debug("device_id: No device connected.")
                return None

    @property
    def frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.frame_size_backup
        except KeyError:
            return self.frame_size_backup
        except TypeError:
            return self.frame_size_backup

    @frame_size.setter
    def frame_size(self, new_size):
        if new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check FPS is in self.available modes
            return stream_profile.fps()
        except AttributeError:
            return self.frame_rate_backup
        except KeyError:
            return self.frame_rate_backup
        except TypeError:
            return self.frame_rate_backup

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.depth_frame_size_backup
        except KeyError:
            return self.depth_frame_size_backup
        except TypeError:
            return self.depth_frame_size_backup

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            return stream_profile.fps()
        except AttributeError:
            return self.depth_frame_rate_backup
        except KeyError:
            return self.depth_frame_rate_backup
        except TypeError:
            return self.depth_frame_rate_backup

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.pipeline_profile is not None and self.pipeline is not None

    @property
    def name(self):
        if self.online:
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.name)
        else:
            logger.debug(
                "self.name: Realsense2 not online. Falling back to Ghost capture"
            )
            return "Ghost capture"
示例#14
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           min_data_confidence,
           start_frame=None,
           end_frame=None,
           plugin_initializers=(),
           out_file_path=None):

    vis_plugins = sorted([
        Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points, Vis_Watermark,
        Scan_Path
    ],
                         key=lambda x: x.__name__)
    analysis_plugins = sorted([
        Manual_Gaze_Correction, Eye_Video_Overlay,
        Pupil_Angle_3D_Fixation_Detector, Gaze_Position_2D_Fixation_Detector
    ],
                              key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(
        os.path.join(user_dir, 'plugins')),
                          key=lambda x: x.__name__)
    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    index_by_name = dict(zip(name_by_index, range(len(name_by_index))))
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [
        f for f in glob(os.path.join(rec_dir, "world.*"))
        if f[-3:] in ('mp4', 'mkv', 'avi')
    ][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    meta_info = load_meta_info(rec_dir)
    rec_version = read_rec_version(meta_info)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.rec_version = rec_version
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']
    g_pool.pupil_positions_by_frame = correlate_data(pupil_list,
                                                     g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(gaze_list,
                                                    g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps
                                 ]  #populated by the fixation detector plugin

    #add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[
            frame.index]

        # publish delayed notifiactions when their time has come.
        for n in g_pool.delayed_notifications.values():
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
示例#15
0
def export(should_terminate,frames_to_export,current_frame, rec_dir,user_dir,start_frame=None,end_frame=None,plugin_initializers=[],out_file_path=None):

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )

   #parse info.csv file
    meta_info_path = os.path.join(rec_dir,"info.csv")
    with open(meta_info_path) as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )

    video_path = [f for f in glob(os.path.join(rec_dir,"world.*")) if f[-3:] in ('mp4','mkv','avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")


    rec_version = read_rec_version(meta_info)
    if rec_version >= VersionFormat('0.5'):
        pass
    elif rec_version >= VersionFormat('0.4'):
        update_recording_0v4_to_current(rec_dir)
    elif rec_version >= VersionFormat('0.3'):
        update_recording_0v3_to_current(rec_dir)
        timestamps_path = os.path.join(rec_dir, "timestamps.npy")
    else:
        logger.Error("This recording is to old. Sorry.")
        return


    timestamps = np.load(timestamps_path)

    cap = File_Capture(video_path,timestamps=timestamps)


    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name =  os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name,file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s"%out_file_path)


    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps)==0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug("Will export from frame %s to frame %s. This means I will export %s frames."%(start_frame,start_frame+frames_to_export.value,frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path,fps=cap.frame_rate,use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.capture = cap
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps


    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g.pupil_positions_by_frame = correlate_data(pupil_list,g.timestamps)
    g.gaze_positions_by_frame = correlate_data(gaze_list,g.timestamps)
    g.fixations_by_frame = [[] for x in g.timestamps] #populated by the fixation detector plugin

    #add plugins
    g.plugins = Plugin_List(g,plugin_by_name,plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s."%(current_frame.value,out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g.pupil_positions_by_frame[frame.index]

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,events)

        writer.write_video_frame(frame)
        current_frame.value +=1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    logger.info("Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"%(current_frame.value,out_file_path,duration,effective_fps))
    return True
示例#16
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse info.csv file
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path) as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))

    video_path = glob(os.path.join(rec_dir, "world.*"))[0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    rec_version = read_rec_version(meta_info)
    if rec_version >= VersionFormat('0.5'):
        pass
    elif rec_version >= VersionFormat('0.4'):
        update_recording_0v4_to_current(rec_dir)
    elif rec_version >= VersionFormat('0.3'):
        update_recording_0v3_to_current(rec_dir)
        timestamps_path = os.path.join(rec_dir, "timestamps.npy")
    else:
        logger.Error("This recording is to old. Sorry.")
        return

    timestamps = np.load(timestamps_path)

    cap = File_Capture(video_path, timestamps=timestamps_path)

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.capture = cap
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g.pupil_positions_by_frame = correlate_data(pupil_list, g.timestamps)
    g.gaze_positions_by_frame = correlate_data(gaze_list, g.timestamps)
    g.fixations_by_frame = [[] for x in g.timestamps
                            ]  #populated by the fixation detector plugin

    #add plugins
    g.plugins = Plugin_List(g, plugin_by_name, plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g.pupil_positions_by_frame[frame.index]

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
示例#17
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector
    from eye_movement import Offline_Eye_Movement_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector, Offline_Eye_Movement_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(
                f
                for f in glob(os.path.join(rec_dir, "world.*"))
                if os.path.splitext(f)[1] in valid_ext
            )
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool, source_path=video_path, fill_gaps=True, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(
                start_frame, start_frame + frames_to_export, frames_to_export
            )
        )

        # setup of writer
        writer = AV_Writer(
            out_file_path, fps=cap.frame_rate, audio_dir=rec_dir, use_timestamps=True
        )

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.pupil_positions_by_id = (
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_0"]),
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_1"]),
        )
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration, effective_fps)
        )
        yield "Export done. This took {:.0f} seconds.".format(duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(os.getpid()))