Exemple #1
0
    def start_depth_recording(self, rec_loc, start_time_synced):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = MPEG_Writer(video_path, start_time_synced)
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )

    #  NOTE: Start time of the export recording will be synced with world recording
    #  export! This means that if the recording to export started later than the world
    #  video, the first frame of the exported recording will not be at timestamp 0 in
    #  the recording, but later. Some video players (e.g. VLC on windows) might display
    #  the video weirdly in this case, but we rather want syncronization between the
    #  exported video!
    start_time = export_window[0]
    writer = MPEG_Writer(output_file, start_time)

    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
Exemple #3
0
class Realsense2_Source(Base_Source):
    def __init__(
            self,
            g_pool,
            device_id=None,
            frame_size=DEFAULT_COLOR_SIZE,
            frame_rate=DEFAULT_COLOR_FPS,
            depth_frame_size=DEFAULT_DEPTH_SIZE,
            depth_frame_rate=DEFAULT_DEPTH_FPS,
            preview_depth=False,
            device_options=(),
            record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")

    def _initialize_device(
            self,
            device_id,
            color_frame_size,
            color_fps,
            depth_frame_size,
            depth_fps,
            device_options=(),
    ):
        self.stop_pipeline()
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if device_id is None:
            device_id = self.device_id

        if device_id is None:  # FIXME these two if blocks look ugly.
            return

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)
        logger.debug("device_id: {} self._available_modes: {}".format(
            device_id, str(self._available_modes)))

        if (color_frame_size is not None and depth_frame_size is not None
                and color_fps is not None and depth_fps is not None):
            color_frame_size = tuple(color_frame_size)
            depth_frame_size = tuple(depth_frame_size)

            logger.debug("Initialize with Color {}@{}\tDepth {}@{}".format(
                color_frame_size, color_fps, depth_frame_size, depth_fps))

            # make sure the frame rates are compatible with the given frame sizes
            color_fps = self._get_valid_frame_rate(rs.stream.color,
                                                   color_frame_size, color_fps)
            depth_fps = self._get_valid_frame_rate(rs.stream.depth,
                                                   depth_frame_size, depth_fps)

            self.frame_size_backup = color_frame_size
            self.depth_frame_size_backup = depth_frame_size
            self.frame_rate_backup = color_fps
            self.depth_frame_rate_backup = depth_fps

            config = self._prep_configuration(color_frame_size, color_fps,
                                              depth_frame_size, depth_fps)
        else:
            config = self._get_default_config()
            self.frame_size_backup = DEFAULT_COLOR_SIZE
            self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
            self.frame_rate_backup = DEFAULT_COLOR_FPS
            self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        try:
            self.pipeline_profile = self.pipeline.start(config)
        except RuntimeError as re:
            logger.error("Cannot start pipeline! " + str(re))
            self.pipeline_profile = None
        else:
            self.stream_profiles = {
                s.stream_type(): s.as_video_stream_profile()
                for s in self.pipeline_profile.get_streams()
            }
            logger.debug("Pipeline started for device " + device_id)
            logger.debug("Stream profiles: " + str(self.stream_profiles))

            self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                               self.frame_size)
            self.update_menu()
            self._needs_restart = False

    def _prep_configuration(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
    ):
        config = rs.config()

        # only use these two formats
        color_format = rs.format.yuyv
        depth_format = rs.format.z16

        config.enable_stream(
            rs.stream.depth,
            depth_frame_size[0],
            depth_frame_size[1],
            depth_format,
            depth_fps,
        )

        config.enable_stream(
            rs.stream.color,
            color_frame_size[0],
            color_frame_size[1],
            color_format,
            color_fps,
        )

        return config

    def _get_default_config(self):
        config = rs.config()  # default config is RGB8, we want YUYV
        config.enable_stream(
            rs.stream.color,
            DEFAULT_COLOR_SIZE[0],
            DEFAULT_COLOR_SIZE[1],
            rs.format.yuyv,
            DEFAULT_COLOR_FPS,
        )
        config.enable_stream(
            rs.stream.depth,
            DEFAULT_DEPTH_SIZE[0],
            DEFAULT_DEPTH_SIZE[1],
            rs.format.z16,
            DEFAULT_DEPTH_FPS,
        )
        return config

    def _get_valid_frame_rate(self, stream_type, frame_size, fps):
        assert stream_type == rs.stream.color or stream_type == rs.stream.depth

        if not self._available_modes or stream_type not in self._available_modes:
            logger.warning(
                "_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError(
                    "Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".
                format(stream_type, frame_size))
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps)
                for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps"
                .format(old_fps, frame_size, fps))

        return fps

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}

        if self.context is None:
            return formats

        devices = self.context.query_devices()
        current_device = None

        for d in devices:
            try:
                serial = d.get_info(rs.camera_info.serial_number)
            except RuntimeError as re:
                logger.error("Device no longer available " + str(re))
            else:
                if device_id == serial:
                    current_device = d

        if current_device is None:
            return formats
        logger.debug("Found the current device: " + device_id)

        sensors = current_device.query_sensors()
        for s in sensors:
            stream_profiles = s.get_stream_profiles()
            for sp in stream_profiles:
                vp = sp.as_video_stream_profile()
                stream_type = vp.stream_type()

                if stream_type not in (rs.stream.color, rs.stream.depth):
                    continue
                elif vp.format() not in (rs.format.z16, rs.format.yuyv):
                    continue

                formats.setdefault(stream_type, {})
                stream_resolution = (vp.width(), vp.height())
                formats[stream_type].setdefault(stream_resolution,
                                                []).append(vp.fps())

        return formats

    def stop_pipeline(self):
        if self.online:
            try:
                self.pipeline_profile = None
                self.stream_profiles = None
                self.pipeline.stop()
                logger.debug("Pipeline stopped.")
            except RuntimeError as re:
                logger.error("Cannot stop the pipeline: " + str(re))

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.stop_pipeline()

    def get_init_dict(self):
        return {
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
        }

    def get_frames(self):
        if self.online:
            try:
                frames = self.pipeline.wait_for_frames(TIMEOUT)
            except RuntimeError as e:
                logger.error("get_frames: Timeout!")
                raise RuntimeError(e)
            else:
                current_time = self.g_pool.get_timestamp()

                color = None
                # if we're expecting color frames
                if rs.stream.color in self.stream_profiles:
                    color_frame = frames.get_color_frame()
                    last_color_frame_ts = color_frame.get_timestamp()
                    if self.last_color_frame_ts != last_color_frame_ts:
                        self.last_color_frame_ts = last_color_frame_ts
                        color = ColorFrame(
                            np.asanyarray(color_frame.get_data()),
                            current_time,
                            self.color_frame_index,
                        )
                        self.color_frame_index += 1

                depth = None
                # if we're expecting depth frames
                if rs.stream.depth in self.stream_profiles:
                    depth_frame = frames.get_depth_frame()
                    last_depth_frame_ts = depth_frame.get_timestamp()
                    if self.last_depth_frame_ts != last_depth_frame_ts:
                        self.last_depth_frame_ts = last_depth_frame_ts
                        depth = DepthFrame(
                            np.asanyarray(depth_frame.get_data()),
                            current_time,
                            self.depth_frame_index,
                        )
                        self.depth_frame_index += 1

                return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart or not self.online:
            logger.debug("recent_events -> restarting device")
            self.restart_device()
            time.sleep(0.01)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except RuntimeError as re:
            logger.warning("Realsense failed to provide frames." + str(re))
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame is not None:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame is not None:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        logger.debug("update_menu")
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if not self.online:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        self.menu.append(
            ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(
            ui.Switch("preview_depth", self, label="Preview Depth"))

        if self._available_modes is not None:

            def frame_size_selection_getter():
                if self.device_id:
                    frame_size = sorted(self._available_modes[rs.stream.color],
                                        reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in frame_size
                    ]
                    return frame_size, labels
                else:
                    return [self.frame_size_backup
                            ], [str(self.frame_size_backup)]

            selector = ui.Selector(
                "frame_size",
                self,
                selection_getter=frame_size_selection_getter,
                label="Color Resolution",
            )
            self.menu.append(selector)

            def frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.color][
                            self.frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return [self.frame_rate_backup
                            ], [str(self.frame_rate_backup)]

            selector = ui.Selector(
                "frame_rate",
                self,
                selection_getter=frame_rate_selection_getter,
                label="Color Frame Rate",
            )
            self.menu.append(selector)

            def depth_frame_size_selection_getter():
                if self.device_id:
                    depth_sizes = sorted(
                        self._available_modes[rs.stream.depth], reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in depth_sizes
                    ]
                    return depth_sizes, labels
                else:
                    return (
                        [self.depth_frame_size_backup],
                        [str(self.depth_frame_size_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_size",
                self,
                selection_getter=depth_frame_size_selection_getter,
                label="Depth Resolution",
            )
            self.menu.append(selector)

            def depth_frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.depth][
                            self.depth_frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return (
                        [self.depth_frame_rate_backup],
                        [str(self.depth_frame_rate_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_rate",
                self,
                selection_getter=depth_frame_rate_selection_getter,
                label="Depth Frame Rate",
            )
            self.menu.append(selector)

            def reset_options():
                logger.debug("reset_options")
                self.reset_device(self.device_id)

            sensor_control = ui.Growing_Menu(label="Sensor Settings")
            sensor_control.append(
                ui.Button("Reset device options to default", reset_options))
            self.menu.append(sensor_control)
        else:
            logger.debug("update_menu: self._available_modes is None")

    def gl_display(self):

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def reset_device(self, device_id):
        logger.debug("reset_device")
        if device_id is None:
            device_id = self.device_id

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": device_id,
            "color_frame_size": None,
            "color_fps": None,
            "depth_frame_size": None,
            "depth_fps": None,
            "device_options": [],  # FIXME
        })

    def restart_device(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = []  # FIXME

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": None,
            "color_frame_size": color_frame_size,
            "color_fps": color_fps,
            "depth_frame_size": depth_frame_size,
            "depth_fps": depth_fps,
            "device_options": device_options,
        })
        logger.debug("self.restart_device --> self.notify_all")

    def on_notify(self, notification):
        logger.debug('self.on_notify, notification["subject"]: ' +
                     notification["subject"])
        if notification["subject"] == "realsense2_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"],
                                       notification["start_time_synced"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc, start_time_synced):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = MPEG_Writer(video_path, start_time_synced)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def device_id(self):
        if self.online:  # already running
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.serial_number)
        else:
            # set the first available device
            devices = self.context.query_devices()
            if devices:
                logger.info("device_id: first device by default.")
                return devices[0].get_info(rs.camera_info.serial_number)
            else:
                logger.debug("device_id: No device connected.")
                return None

    @property
    def frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.frame_size_backup
        except KeyError:
            return self.frame_size_backup
        except TypeError:
            return self.frame_size_backup

    @frame_size.setter
    def frame_size(self, new_size):
        if new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check FPS is in self.available modes
            return stream_profile.fps()
        except AttributeError:
            return self.frame_rate_backup
        except KeyError:
            return self.frame_rate_backup
        except TypeError:
            return self.frame_rate_backup

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.depth_frame_size_backup
        except KeyError:
            return self.depth_frame_size_backup
        except TypeError:
            return self.depth_frame_size_backup

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            return stream_profile.fps()
        except AttributeError:
            return self.depth_frame_rate_backup
        except KeyError:
            return self.depth_frame_rate_backup
        except TypeError:
            return self.depth_frame_rate_backup

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.pipeline_profile is not None and self.pipeline is not None

    @property
    def name(self):
        if self.online:
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.name)
        else:
            logger.debug(
                "self.name: Realsense2 not online. Falling back to Ghost capture"
            )
            return "Ghost capture"
Exemple #4
0
class Recorder(System_Plugin_Base):
    """Capture Recorder"""

    icon_chr = chr(0xE04B)
    icon_font = "pupil_icons"
    warning_low_disk_space_th = 5.0  # threshold in GB
    stop_rec_low_disk_space_th = 1.0  # threshold in GB

    def __init__(
        self,
        g_pool,
        session_name=get_auto_name(),
        rec_root_dir=None,
        user_info={
            "name": "",
            "additional_field": "change_me"
        },
        info_menu_conf={},
        show_info_menu=False,
        record_eye=True,
        raw_jpeg=True,
    ):
        super().__init__(g_pool)
        # update name if it was autogenerated.
        if session_name.startswith("20") and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_root_dir = os.path.join(base_dir, "recordings")

        if (rec_root_dir and rec_root_dir != default_rec_root_dir
                and self.verify_path(rec_root_dir)):
            self.rec_root_dir = rec_root_dir
        else:
            try:
                os.makedirs(default_rec_root_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "{}"'.format(
                    default_rec_root_dir))
            self.rec_root_dir = default_rec_root_dir

        self.raw_jpeg = raw_jpeg
        self.order = 0.9
        self.record_eye = record_eye
        self.session_name = session_name
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

        self.low_disk_space_thumb = None
        check_timer = timer(1.0)
        self.check_space = lambda: next(check_timer)

    def get_init_dict(self):
        d = {}
        d["record_eye"] = self.record_eye
        d["session_name"] = self.session_name
        d["user_info"] = self.user_info
        d["info_menu_conf"] = self.info_menu_conf
        d["show_info_menu"] = self.show_info_menu
        d["rec_root_dir"] = self.rec_root_dir
        d["raw_jpeg"] = self.raw_jpeg
        return d

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Recorder"
        self.menu_icon.order = 0.29

        self.menu.append(
            ui.Info_Text(
                'Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'
            ))
        self.menu.append(
            ui.Info_Text(
                'Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'
            ))
        self.menu.append(
            ui.Text_Input(
                "rec_root_dir",
                self,
                setter=self.set_rec_root_dir,
                label="Path to recordings",
            ))
        self.menu.append(
            ui.Text_Input(
                "session_name",
                self,
                setter=self.set_session_name,
                label="Recording session name",
            ))
        self.menu.append(
            ui.Switch(
                "show_info_menu",
                self,
                on_val=True,
                off_val=False,
                label="Request additional user info",
            ))
        self.menu.append(
            ui.Selector(
                "raw_jpeg",
                self,
                selection=[True, False],
                labels=["bigger file, less CPU", "smaller file, more CPU"],
                label="Compression",
            ))
        self.menu.append(
            ui.Info_Text(
                "Recording the raw eye video is optional. We use it for debugging."
            ))
        self.menu.append(
            ui.Switch("record_eye",
                      self,
                      on_val=True,
                      off_val=False,
                      label="Record eye"))
        self.button = ui.Thumb(
            "running",
            self,
            setter=self.toggle,
            label="R",
            hotkey=Hotkey.RECORDER_RUNNING_TOGGLE_CAPTURE_HOTKEY(),
        )
        self.button.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.g_pool.quickbar.insert(2, self.button)

        self.low_disk_space_thumb = ui.Thumb("low_disk_warn",
                                             label="!",
                                             getter=lambda: True,
                                             setter=lambda x: None)
        self.low_disk_space_thumb.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.low_disk_space_thumb.status_text = "Low disk space"

    def deinit_ui(self):
        if self.low_disk_space_thumb in self.g_pool.quickbar:
            self.g_pool.quickbar.remove(self.low_disk_space_thumb)
        self.g_pool.quickbar.remove(self.button)
        self.button = None
        self.remove_menu()

    def toggle(self, _=None):
        if self.running:
            self.notify_all({"subject": "recording.should_stop"})
            self.notify_all({
                "subject": "recording.should_stop",
                "remote_notify": "all"
            })
        else:
            self.notify_all({
                "subject": "recording.should_start",
                "session_name": self.session_name
            })
            self.notify_all({
                "subject": "recording.should_start",
                "session_name": self.session_name,
                "remote_notify": "all",
            })

    def on_notify(self, notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session.
                fields:
                - 'session_name' change session name
                    start with `/` to ingore the rec base dir and start from root instead.
                - `record_eye` boolean that indicates recording of the eyes, defaults to current setting
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get("record", False) and self.running:
            if "timestamp" not in notification:
                logger.error(
                    "Notification without timestamp will not be saved.")
                notification["timestamp"] = self.g_pool.get_timestamp()
            # else:
            notification["topic"] = "notify." + notification["subject"]
            try:
                writer = self.pldata_writers["notify"]
            except KeyError:
                writer = PLData_Writer(self.rec_path, "notify")
                self.pldata_writers["notify"] = writer
            writer.append(notification)

        elif notification["subject"] == "recording.should_start":
            if self.running:
                logger.info("Recording already running!")
            else:
                self.record_eye = notification.get("record_eye",
                                                   self.record_eye)
                if notification.get("session_name", ""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification["subject"] == "recording.should_stop":
            if self.running:
                self.stop()
            else:
                logger.info("Recording already stopped!")

    def get_rec_time_str(self):
        rec_time = gmtime(time() - self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        if isinstance(self.g_pool.capture, NDSI_Source):
            # If the user did not enable TimeSync, the timestamps will be way off and
            # the recording code will crash. We check the difference between the last
            # frame's time and the start_time_synced and if this does not match, we stop
            # the recording and show a warning instead.
            TIMESTAMP_ERROR_THRESHOLD = 5.0
            frame = self.g_pool.capture._recent_frame
            if frame is None:
                logger.error(
                    "Your connection does not seem to be stable enough for "
                    "recording Pupil Mobile via WiFi. We recommend recording "
                    "on the phone.")
                return
            if abs(frame.timestamp -
                   start_time_synced) > TIMESTAMP_ERROR_THRESHOLD:
                logger.error(
                    "Pupil Mobile stream is not in sync. Aborting recording."
                    " Enable the Time Sync plugin and try again.")
                return

        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except FileExistsError:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info = RecordingInfoFile.create_empty_file(self.rec_path)
        self.meta_info.recording_software_name = (
            RecordingInfoFile.RECORDING_SOFTWARE_NAME_PUPIL_CAPTURE)
        self.meta_info.recording_software_version = str(self.g_pool.version)
        self.meta_info.recording_name = self.session_name
        self.meta_info.start_time_synced_s = start_time_synced
        self.meta_info.start_time_system_s = self.start_time
        self.meta_info.recording_uuid = recording_uuid
        self.meta_info.system_info = get_system_info()

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, start_time_synced)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = MPEG_Writer(self.video_path, start_time_synced)

        calibration_data_notification_classes = [
            CalibrationSetupNotification,
            CalibrationResultNotification,
        ]
        writer = PLData_Writer(self.rec_path, "notify")

        for note_class in calibration_data_notification_classes:
            try:
                file_path = os.path.join(self.g_pool.user_dir,
                                         note_class.file_name())
                note = note_class.from_dict(load_object(file_path))
                note_dict = note.as_dict()

                note_dict["topic"] = "notify." + note_dict["subject"]
                writer.append(note_dict)
            except FileNotFoundError:
                continue

        self.pldata_writers["notify"] = writer

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            "subject": "recording.started",
            "rec_path": self.rec_path,
            "session_name": self.session_name,
            "record_eye": self.record_eye,
            "compression": self.raw_jpeg,
            "start_time_synced": float(start_time_synced),
        })

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu("additional Recording Info",
                                         size=(300, 300),
                                         pos=(300, 300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.keys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(
            ui.Info_Text(
                'Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'
            ))
        self.info_menu.append(
            ui.Text_Input("user_info",
                          self,
                          setter=set_user_info,
                          label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def recent_events(self, events):

        if self.check_space():
            disk_space = available_gb(self.rec_root_dir)
            if (disk_space < self.warning_low_disk_space_th
                    and self.low_disk_space_thumb not in self.g_pool.quickbar):
                self.g_pool.quickbar.append(self.low_disk_space_thumb)
            elif (disk_space >= self.warning_low_disk_space_th
                  and self.low_disk_space_thumb in self.g_pool.quickbar):
                self.g_pool.quickbar.remove(self.low_disk_space_thumb)

            if self.running and disk_space <= self.stop_rec_low_disk_space_th:
                self.stop()
                logger.error("Recording was stopped due to low disk space!")

        if self.running:
            for key, data in events.items():
                if key not in ("dt",
                               "depth_frame") and not key.startswith("frame"):
                    try:
                        writer = self.pldata_writers[key]
                    except KeyError:
                        writer = PLData_Writer(self.rec_path, key)
                        self.pldata_writers[key] = writer
                    writer.extend(data)
            if "frame" in events:
                frame = events["frame"]
                try:
                    self.writer.write_video_frame(frame)
                    self.frame_count += 1
                except NonMonotonicTimestampError as e:
                    logger.error("Recorder received non-monotonic timestamp!"
                                 " Stopping the recording!")
                    logger.debug(str(e))
                    self.notify_all({"subject": "recording.should_stop"})
                    self.notify_all({
                        "subject": "recording.should_stop",
                        "remote_notify": "all"
                    })
            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        duration_s = self.g_pool.get_timestamp(
        ) - self.meta_info.start_time_synced_s

        # explicit release of VideoWriter
        try:
            self.writer.release()
        except RuntimeError:
            logger.error("No world video recorded")
        else:
            logger.debug("Closed media container")
            self.g_pool.capture.intrinsics.save(self.rec_path,
                                                custom_name="world")
        finally:
            self.writer = None

        for writer in self.pldata_writers.values():
            writer.close()

        del self.pldata_writers

        surface_definition_file_paths = glob.glob(
            os.path.join(self.g_pool.user_dir, "surface_definitions*"))

        if len(surface_definition_file_paths) > 0:
            for source_path in surface_definition_file_paths:
                _, filename = os.path.split(source_path)
                target_path = os.path.join(self.rec_path, filename)
                copy2(source_path, target_path)
        else:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        self.meta_info.duration_s = duration_s
        self.meta_info.save_file()

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"),
                      "w",
                      newline="") as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except OSError:
            logger.exception(
                "Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        if self.menu:
            self.menu.read_only = False
            self.button.status_text = ""

        logger.info("Saved Recording.")
        self.notify_all({
            "subject": "recording.stopped",
            "rec_path": self.rec_path
        })

    def cleanup(self):
        """gets called when the plugin get terminated.
        either volunatily or forced.
        """
        if self.running:
            self.stop()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except Exception:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '{}'.".format(n_path))
            return False
        else:
            return n_path

    def set_rec_root_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_root_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if os.path.sep in val:
                logger.warning(
                    "You session name will create one or more subdirectories")
            self.session_name = val
Exemple #5
0
    def start(self):
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        if isinstance(self.g_pool.capture, NDSI_Source):
            # If the user did not enable TimeSync, the timestamps will be way off and
            # the recording code will crash. We check the difference between the last
            # frame's time and the start_time_synced and if this does not match, we stop
            # the recording and show a warning instead.
            TIMESTAMP_ERROR_THRESHOLD = 5.0
            frame = self.g_pool.capture._recent_frame
            if frame is None:
                logger.error(
                    "Your connection does not seem to be stable enough for "
                    "recording Pupil Mobile via WiFi. We recommend recording "
                    "on the phone.")
                return
            if abs(frame.timestamp -
                   start_time_synced) > TIMESTAMP_ERROR_THRESHOLD:
                logger.error(
                    "Pupil Mobile stream is not in sync. Aborting recording."
                    " Enable the Time Sync plugin and try again.")
                return

        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except FileExistsError:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info = RecordingInfoFile.create_empty_file(self.rec_path)
        self.meta_info.recording_software_name = (
            RecordingInfoFile.RECORDING_SOFTWARE_NAME_PUPIL_CAPTURE)
        self.meta_info.recording_software_version = str(self.g_pool.version)
        self.meta_info.recording_name = self.session_name
        self.meta_info.start_time_synced_s = start_time_synced
        self.meta_info.start_time_system_s = self.start_time
        self.meta_info.recording_uuid = recording_uuid
        self.meta_info.system_info = get_system_info()

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, start_time_synced)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = MPEG_Writer(self.video_path, start_time_synced)

        calibration_data_notification_classes = [
            CalibrationSetupNotification,
            CalibrationResultNotification,
        ]
        writer = PLData_Writer(self.rec_path, "notify")

        for note_class in calibration_data_notification_classes:
            try:
                file_path = os.path.join(self.g_pool.user_dir,
                                         note_class.file_name())
                note = note_class.from_dict(load_object(file_path))
                note_dict = note.as_dict()

                note_dict["topic"] = "notify." + note_dict["subject"]
                writer.append(note_dict)
            except FileNotFoundError:
                continue

        self.pldata_writers["notify"] = writer

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            "subject": "recording.started",
            "rec_path": self.rec_path,
            "session_name": self.session_name,
            "record_eye": self.record_eye,
            "compression": self.raw_jpeg,
            "start_time_synced": float(start_time_synced),
        })
Exemple #6
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
    hide_ui=False,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
        ``set_detection_mapping_mode``: Sets detection method
        ``eye_process.should_stop``: Stops the eye process
        ``recording.started``: Starts recording eye video
        ``recording.stopped``: Stops recording eye video
        ``frame_publishing.started``: Starts frame publishing
        ``frame_publishing.stopped``: Stops frame publishing
        ``start_eye_plugin``: Start plugins in eye process

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # Plug-ins
        from plugin import Plugin_List

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, MPEG_Writer
        from ndsi import H264Writer
        from video_capture import source_classes, manager_classes

        from background_helper import IPC_Logging_Task_Proxy
        from pupil_detector_plugins import available_detector_plugins
        from pupil_detector_plugins.manager import PupilDetectorManager

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        def interrupt_handler(sig, frame):
            import traceback

            trace = traceback.format_stack(f=frame)
            logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
            # NOTE: Interrupt is handled in world/service/player which are responsible for
            # shutting down the eye process properly

        signal.signal(signal.SIGINT, interrupt_handler)

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.eye_id = eye_id
        g_pool.process = f"eye{eye_id}"
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        default_detector_cls, available_detectors = available_detector_plugins(
        )
        plugins = (manager_classes + source_classes + available_detectors +
                   [PupilDetectorManager])
        g_pool.plugin_by_name = {p.__name__: p for p in plugins}

        preferred_names = [
            f"Pupil Cam3 ID{eye_id}",
            f"Pupil Cam2 ID{eye_id}",
            f"Pupil Cam1 ID{eye_id}",
        ]
        if eye_id == 0:
            preferred_names += ["HD-6000"]
        default_capture_settings = (
            "UVC_Source",
            {
                "preferred_names": preferred_names,
                "frame_size": (320, 240),
                "frame_rate": 120,
            },
        )

        default_plugins = [
            # TODO: extend with plugins
            default_capture_settings,
            ("UVC_Manager", {}),
            # Detector needs to be loaded first to set `g_pool.pupil_detector`
            (default_detector_cls.__name__, {}),
            ("PupilDetectorManager", {}),
        ]

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            for plugin in g_pool.plugins:
                if plugin.on_drop(paths):
                    break

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir,
                         "user_settings_eye{}".format(eye_id)))
        if VersionFormat(session_settings.get("version",
                                              "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode",
                                                   "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image":
            "Raw eye camera image. This uses the least amount of CPU power",
            "roi":
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm":
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        if hide_ui:
            glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0)  # hide window
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = session_settings.get("window_size",
                                             (640 + icon_bar_width, 480))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position",
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(
            np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu("Settings",
                                           pos=(-500, 0),
                                           size=(-icon_bar_width, 0),
                                           header_pos="left")
        g_pool.iconbar = ui.Scrolling_Menu("Icons",
                                           pos=(-icon_bar_width, 0),
                                           size=(0, 0),
                                           header_pos="hidden")
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            ))

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos,
                        g_pool.capture.frame_size)  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,
                                                     g_pool.u_r.handle_size,
                                                     g_pool.u_r.handle_size):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size",
                                          set_window_size))
        general_settings.append(
            ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            ))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        plugins_to_load = session_settings.get("loaded_plugins",
                                               default_plugins)
        if overwrite_cap_settings:
            # Ensure that overwrite_cap_settings takes preference over source plugins
            # with incorrect settings that were loaded from session settings.
            plugins_to_load.append(overwrite_cap_settings)

        g_pool.plugins = Plugin_List(g_pool, plugins_to_load)

        g_pool.writer = None

        g_pool.u_r = UIRoi(
            (g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(
                roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        start_time_synced = notification["start_time_synced"]
                        logger.info(
                            "Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path,
                                                  "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, start_time_synced)
                        elif hasattr(g_pool.capture._recent_frame,
                                     "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = MPEG_Writer(
                                video_path, start_time_synced)
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify({
                        "subject": "meta.doc",
                        "actor": "eye{}".format(eye_id),
                        "doc": eye.__doc__,
                    })
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (subject.startswith("start_eye_plugin")
                      and notification["target"] == g_pool.process):
                    try:
                        g_pool.plugins.add(
                            g_pool.plugin_by_name[notification["name"]],
                            notification.get("args", {}),
                        )
                    except KeyError as err:
                        logger.error(f"Attempt to load unknown plugin: {err}")

                for plugin in g_pool.plugins:
                    plugin.on_notify(notification)

            event = {}
            for plugin in g_pool.plugins:
                plugin.recent_events(event)

            frame = event.get("frame")
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                        f_height,
                        f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.
                                format(type(frame), frame_publish_format))
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send({
                            "topic":
                            "frame.eye.{}".format(eye_id),
                            "width":
                            frame.width,
                            "height":
                            frame.height,
                            "index":
                            frame.index,
                            "timestamp":
                            frame.timestamp,
                            "format":
                            frame_publish_format,
                            "__raw_data__": [data],
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                result = event.get("pupil_detection_result", None)
                if result is not None:
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    glViewport(0, 0, *camera_render_size)
                    for p in g_pool.plugins:
                        p.gl_display()

                    glViewport(0, 0, *camera_render_size)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    try:
                        clipboard = glfw.glfwGetClipboardString(
                            main_window).decode()
                    except AttributeError:  # clipboard is None, might happen on startup
                        clipboard = ""
                    g_pool.gui.update_clipboard(clipboard)
                    user_input = g_pool.gui.update()
                    if user_input.clipboard != clipboard:
                        # only write to clipboard if content changed
                        glfw.glfwSetClipboardString(
                            main_window, user_input.clipboard.encode())

                    for button, action, mods in user_input.buttons:
                        x, y = glfw.glfwGetCursorPos(main_window)
                        pos = x * hdpi_factor, y * hdpi_factor
                        pos = normalize(pos, camera_render_size)
                        # Position in img pixels
                        pos = denormalize(pos, g_pool.capture.frame_size)

                        for plugin in g_pool.plugins:
                            if plugin.on_click(pos, button, action):
                                break

                    for key, scancode, action, mods in user_input.keys:
                        for plugin in g_pool.plugins:
                            if plugin.on_key(key, scancode, action, mods):
                                break

                    for char_ in user_input.chars:
                        for plugin in g_pool.plugins:
                            if plugin.on_char(char_):
                                break

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer.release()
            g_pool.writer = None

        session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["version"] = str(g_pool.version)

        if not hide_ui:
            glfw.glfwRestoreWindow(
                main_window)  # need to do this for windows os
            session_settings["window_position"] = glfw.glfwGetWindowPos(
                main_window)
            session_window_size = glfw.glfwGetWindowSize(main_window)
            if 0 not in session_window_size:
                session_settings["window_size"] = session_window_size

        session_settings.close()

        for plugin in g_pool.plugins:
            plugin.alive = False
        g_pool.plugins.clean()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")
Exemple #7
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
    hide_ui=False,
    debug=False,
    pub_socket_hwm=None,
    parent_application="capture",
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
        ``eye_process.should_stop``: Stops the eye process
        ``recording.started``: Starts recording eye video
        ``recording.stopped``: Stops recording eye video
        ``frame_publishing.started``: Starts frame publishing
        ``frame_publishing.stopped``: Stops frame publishing
        ``start_eye_plugin``: Start plugins in eye process

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url, pub_socket_hwm)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        from OpenGL.GL import GL_COLOR_BUFFER_BIT

        # display
        import glfw
        from gl_utils import GLFWErrorReporting

        GLFWErrorReporting.set_default()

        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import Named_Texture
        import gl_utils
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport

        # monitoring
        import psutil

        # Plug-ins
        from plugin import Plugin_List

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import parse_version
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, MPEG_Writer, NonMonotonicTimestampError
        from ndsi import H264Writer
        from video_capture import source_classes, manager_classes
        from roi import Roi

        from background_helper import IPC_Logging_Task_Proxy
        from pupil_detector_plugins import available_detector_plugins, EVENT_KEY

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        def interrupt_handler(sig, frame):
            import traceback

            trace = traceback.format_stack(f=frame)
            logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
            # NOTE: Interrupt is handled in world/service/player which are responsible for
            # shutting down the eye process properly

        signal.signal(signal.SIGINT, interrupt_handler)

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        content_scale = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.debug = debug
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = parent_application
        g_pool.eye_id = eye_id
        g_pool.process = f"eye{eye_id}"
        g_pool.timebase = timebase
        g_pool.camera_render_size = None

        g_pool.zmq_ctx = zmq_ctx
        g_pool.ipc_pub = ipc_socket
        g_pool.ipc_pub_url = ipc_pub_url
        g_pool.ipc_sub_url = ipc_sub_url
        g_pool.ipc_push_url = ipc_push_url

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        def load_runtime_pupil_detection_plugins():
            from plugin import import_runtime_plugins
            from pupil_detector_plugins.detector_base_plugin import PupilDetectorPlugin

            plugins_path = os.path.join(g_pool.user_dir, "plugins")

            for plugin in import_runtime_plugins(plugins_path):
                if not isinstance(plugin, type):
                    continue
                if not issubclass(plugin, PupilDetectorPlugin):
                    continue
                if plugin is PupilDetectorPlugin:
                    continue
                yield plugin

        available_detectors = available_detector_plugins()
        runtime_detectors = list(load_runtime_pupil_detection_plugins())
        plugins = (
            manager_classes
            + source_classes
            + available_detectors
            + runtime_detectors
            + [Roi]
        )
        g_pool.plugin_by_name = {p.__name__: p for p in plugins}

        preferred_names = [
            f"Pupil Cam3 ID{eye_id}",
            f"Pupil Cam2 ID{eye_id}",
            f"Pupil Cam1 ID{eye_id}",
        ]
        if eye_id == 0:
            preferred_names += ["HD-6000"]

        default_capture_name = "UVC_Source"
        default_capture_settings = {
            "preferred_names": preferred_names,
            "frame_size": (192, 192),
            "frame_rate": 120,
        }

        default_plugins = [
            # TODO: extend with plugins
            (default_capture_name, default_capture_settings),
            ("UVC_Manager", {}),
            *[(p.__name__, {}) for p in available_detectors],
            ("NDSI_Manager", {}),
            ("HMD_Streaming_Manager", {}),
            ("File_Manager", {}),
            ("Roi", {}),
        ]

        def consume_events_and_render_buffer():
            glfw.make_context_current(main_window)
            clear_gl_screen()

            if all(c > 0 for c in g_pool.camera_render_size):
                glViewport(0, 0, *g_pool.camera_render_size)
                for p in g_pool.plugins:
                    p.gl_display()

            glViewport(0, 0, *window_size)
            # render graphs
            fps_graph.draw()
            cpu_graph.draw()

            # render GUI
            try:
                clipboard = glfw.get_clipboard_string(main_window).decode()
            except (AttributeError, glfw.GLFWError):
                # clipboard is None, might happen on startup
                clipboard = ""
            g_pool.gui.update_clipboard(clipboard)
            user_input = g_pool.gui.update()
            if user_input.clipboard != clipboard:
                # only write to clipboard if content changed
                glfw.set_clipboard_string(main_window, user_input.clipboard)

            for button, action, mods in user_input.buttons:
                x, y = glfw.get_cursor_pos(main_window)
                pos = gl_utils.window_coordinate_to_framebuffer_coordinate(
                    main_window, x, y, cached_scale=None
                )
                pos = normalize(pos, g_pool.camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                # Position in img pixels
                pos = denormalize(pos, g_pool.capture.frame_size)

                for plugin in g_pool.plugins:
                    if plugin.on_click(pos, button, action):
                        break

            for key, scancode, action, mods in user_input.keys:
                for plugin in g_pool.plugins:
                    if plugin.on_key(key, scancode, action, mods):
                        break

            for char_ in user_input.chars:
                for plugin in g_pool.plugins:
                    if plugin.on_char(char_):
                        break

            # update screen
            glfw.swap_buffers(main_window)

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal content_scale

            is_minimized = bool(glfw.get_window_attrib(window, glfw.ICONIFIED))

            if is_minimized:
                return

            # Always clear buffers on resize to make sure that there are no overlapping
            # artifacts from previous frames.
            gl_utils.glClear(GL_COLOR_BUFFER_BIT)
            gl_utils.glClearColor(0, 0, 0, 1)

            active_window = glfw.get_current_context()
            glfw.make_context_current(window)
            content_scale = gl_utils.get_content_scale(window)
            framebuffer_scale = gl_utils.get_framebuffer_scale(window)
            g_pool.gui.scale = content_scale
            window_size = w, h
            g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = content_scale
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.make_context_current(active_window)

            # Minimum window size required, otherwise parts of the UI can cause openGL
            # issues with permanent effects. Depends on the content scale, which can
            # potentially be dynamically modified, so we re-adjust the size limits every
            # time here.
            min_size = int(2 * icon_bar_width * g_pool.gui.scale / framebuffer_scale)
            glfw.set_window_size_limits(
                window,
                min_size,
                min_size,
                glfw.DONT_CARE,
                glfw.DONT_CARE,
            )

            # Needed, to update the window buffer while resizing
            consume_events_and_render_buffer()

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x, y = gl_utils.window_coordinate_to_framebuffer_coordinate(
                window, x, y, cached_scale=None
            )
            g_pool.gui.update_mouse(x, y)

            pos = x, y
            pos = normalize(pos, g_pool.camera_render_size)
            if g_pool.flip:
                pos = 1 - pos[0], 1 - pos[1]
            # Position in img pixels
            pos = denormalize(pos, g_pool.capture.frame_size)

            for p in g_pool.plugins:
                p.on_pos(pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, paths):
            for plugin in g_pool.plugins:
                if plugin.on_drop(paths):
                    break

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
        )
        if parse_version(session_settings.get("version", "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        camera_is_physically_flipped = eye_id == 0
        g_pool.iconified = False
        g_pool.capture = None
        g_pool.flip = session_settings.get("flip", camera_is_physically_flipped)
        g_pool.display_mode = session_settings.get("display_mode", "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image": "Raw eye camera image. This uses the least amount of CPU power",
            "roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.init()
        glfw.window_hint(glfw.SCALE_TO_MONITOR, glfw.TRUE)
        if hide_ui:
            glfw.window_hint(glfw.VISIBLE, 0)  # hide window
        title = "Pupil Capture - eye {}".format(eye_id)

        # Pupil Cam1 uses 4:3 resolutions. Pupil Cam2 and Cam3 use 1:1 resolutions.
        # As all Pupil Core and VR/AR add-ons are shipped with Pupil Cam2 and Cam3
        # cameras, we adjust the default eye window size to a 1:1 content aspect ratio.
        # The size of 500 was chosen s.t. the menu still fits.
        default_window_size = 500 + icon_bar_width, 500
        width, height = session_settings.get("window_size", default_window_size)

        main_window = glfw.create_window(width, height, title, None, None)

        window_position_manager = gl_utils.WindowPositionManager()
        window_pos = window_position_manager.new_window_position(
            window=main_window,
            default_position=window_position_default,
            previous_position=session_settings.get("window_position", None),
        )
        glfw.set_window_pos(main_window, window_pos[0], window_pos[1])

        glfw.make_context_current(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.menubar = ui.Scrolling_Menu(
            "Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
        )
        g_pool.iconbar = ui.Scrolling_Menu(
            "Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
        )
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")

        def set_window_size():
            # Get current capture frame size
            f_width, f_height = g_pool.capture.frame_size
            # Eye camera resolutions are too small to be used as default window sizes.
            # We use double their size instead.
            frame_scale_factor = 2
            f_width *= frame_scale_factor
            f_height *= frame_scale_factor

            # Get current display scale factor
            content_scale = gl_utils.get_content_scale(main_window)
            framebuffer_scale = gl_utils.get_framebuffer_scale(main_window)
            display_scale_factor = content_scale / framebuffer_scale

            # Scale the capture frame size by display scale factor
            f_width *= display_scale_factor
            f_height *= display_scale_factor

            # Increas the width to account for the added scaled icon bar width
            f_width += icon_bar_width * display_scale_factor

            # Set the newly calculated size (scaled capture frame size + scaled icon bar width)
            glfw.set_window_size(main_window, int(f_width), int(f_height))

        general_settings.append(ui.Button("Reset window size", set_window_size))
        general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            )
        )
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode]
        )

        general_settings.append(g_pool.display_mode_info)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)

        plugins_to_load = session_settings.get("loaded_plugins", default_plugins)
        if overwrite_cap_settings:
            # Ensure that overwrite_cap_settings takes preference over source plugins
            # with incorrect settings that were loaded from session settings.
            plugins_to_load.append(overwrite_cap_settings)

        # Add runtime plugins to the list of plugins to load with default arguments,
        # if not already restored from session settings
        plugins_to_load_names = set(name for name, _ in plugins_to_load)
        for runtime_detector in runtime_detectors:
            runtime_name = runtime_detector.__name__
            if runtime_name not in plugins_to_load_names:
                plugins_to_load.append((runtime_name, {}))

        g_pool.plugins = Plugin_List(g_pool, plugins_to_load)

        if not g_pool.capture:
            # Make sure we always have a capture running. Important if there was no
            # capture stored in session settings.
            g_pool.plugins.add(
                g_pool.plugin_by_name[default_capture_name], default_capture_settings
            )

        toggle_general_settings(True)

        g_pool.writer = None
        g_pool.rec_path = None

        # Register callbacks main_window
        glfw.set_framebuffer_size_callback(main_window, on_resize)
        glfw.set_window_iconify_callback(main_window, on_iconify)
        glfw.set_key_callback(main_window, on_window_key)
        glfw.set_char_callback(main_window, on_window_char)
        glfw.set_mouse_button_callback(main_window, on_window_mouse_button)
        glfw.set_cursor_pos_callback(main_window, on_pos)
        glfw.set_scroll_callback(main_window, on_scroll)
        glfw.set_drop_callback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})
        # If previously selected plugin was not loaded this time, we will have an
        # expanded menubar without any menu selected. We need to ensure the menubar is
        # collapsed in this case.
        if all(submenu.collapsed for submenu in g_pool.menubar.elements):
            g_pool.menubar.collapsed = True

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.get_framebuffer_size(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        if platform.system() == "Darwin":
            # On macOS, calls to glfw.swap_buffers() deliberately take longer in case of
            # occluded windows, based on the swap interval value. This causes an FPS drop
            # and leads to problems when recording. To side-step this behaviour, the swap
            # interval is set to zero.
            #
            # Read more about window occlusion on macOS here:
            # https://developer.apple.com/library/archive/documentation/Performance/Conceptual/power_efficiency_guidelines_osx/WorkWhenVisible.html
            glfw.swap_interval(0)

        # Event loop
        window_should_close = False
        while not window_should_close:

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        g_pool.rec_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        start_time_synced = notification["start_time_synced"]
                        logger.info(f"Will save eye video to: {g_pool.rec_path}")
                        video_path = os.path.join(
                            g_pool.rec_path, "eye{}.mp4".format(eye_id)
                        )
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(video_path, start_time_synced)
                        elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = MPEG_Writer(video_path, start_time_synced)
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        else:
                            # TODO: wrap recording logic into plugin
                            g_pool.capture.intrinsics.save(
                                g_pool.rec_path, custom_name=f"eye{eye_id}"
                            )
                        finally:
                            g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify(
                        {
                            "subject": "meta.doc",
                            "actor": "eye{}".format(eye_id),
                            "doc": eye.__doc__,
                        }
                    )
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (
                    subject.startswith("start_eye_plugin")
                    and notification["target"] == g_pool.process
                ):
                    try:
                        g_pool.plugins.add(
                            g_pool.plugin_by_name[notification["name"]],
                            notification.get("args", {}),
                        )
                    except KeyError as err:
                        logger.error(f"Attempt to load unknown plugin: {err}")
                elif (
                    subject.startswith("stop_eye_plugin")
                    and notification["target"] == g_pool.process
                ):
                    try:
                        plugin_to_stop = g_pool.plugin_by_name[notification["name"]]
                    except KeyError as err:
                        logger.error(f"Attempt to load unknown plugin: {err}")
                    else:
                        plugin_to_stop.alive = False
                        g_pool.plugins.clean()

                for plugin in g_pool.plugins:
                    plugin.on_notify(notification)

            event = {}
            for plugin in g_pool.plugins:
                plugin.recent_events(event)

            frame = event.get("frame")
            if frame:
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.format(
                                    type(frame), frame_publish_format
                                )
                            )
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send(
                            {
                                "topic": "frame.eye.{}".format(eye_id),
                                "width": frame.width,
                                "height": frame.height,
                                "index": frame.index,
                                "timestamp": frame.timestamp,
                                "format": frame_publish_format,
                                "__raw_data__": [data],
                            }
                        )

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    try:
                        g_pool.writer.write_video_frame(frame)
                    except NonMonotonicTimestampError as e:
                        logger.error(
                            "Recorder received non-monotonic timestamp!"
                            " Stopping the recording!"
                        )
                        logger.debug(str(e))
                        ipc_socket.notify({"subject": "recording.should_stop"})
                        ipc_socket.notify(
                            {"subject": "recording.should_stop", "remote_notify": "all"}
                        )

                for result in event.get(EVENT_KEY, ()):
                    pupil_socket.send(result)

            # GL drawing
            if window_should_update():
                cpu_graph.update()
                if is_window_visible(main_window):
                    consume_events_and_render_buffer()
                glfw.poll_events()
                window_should_close = glfw.window_should_close(main_window)

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer.release()
            g_pool.writer = None

        session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
        # save session persistent settings
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["version"] = str(g_pool.version)

        if not hide_ui:
            glfw.restore_window(main_window)  # need to do this for windows os
            session_settings["window_position"] = glfw.get_window_pos(main_window)
            session_window_size = glfw.get_window_size(main_window)
            if 0 not in session_window_size:
                f_width, f_height = session_window_size
                if platform.system() in ("Windows", "Linux"):
                    # Store unscaled window size as the operating system will scale the
                    # windows appropriately during launch on Windows and Linux.
                    f_width, f_height = (
                        f_width / content_scale,
                        f_height / content_scale,
                    )
                session_settings["window_size"] = int(f_width), int(f_height)

        session_settings.close()

        for plugin in g_pool.plugins:
            plugin.alive = False
        g_pool.plugins.clean()

    glfw.destroy_window(main_window)
    g_pool.gui.terminate()
    glfw.terminate()
    logger.info("Process shutting down.")
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """
    def __init__(
            self,
            g_pool,
            device_id=0,
            frame_size=(1920, 1080),
            frame_rate=30,
            depth_frame_size=(640, 480),
            depth_frame_rate=60,
            align_streams=False,
            preview_depth=False,
            device_options=(),
            record_depth=True,
            stream_preset=None,
    ):
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self.pitch = 0
        self.yaw = 0
        self.mouse_drag = False
        self.last_pos = (0, 0)
        self.depth_window = None
        self._needs_restart = False
        self.stream_preset = stream_preset
        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )

    def _initialize_device(
            self,
            device_id,
            color_frame_size,
            color_fps,
            depth_frame_size,
            depth_fps,
            device_options=(),
    ):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream(), PointStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            if not self._needs_restart:
                logger.error(
                    "Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error(
                "Camera with id {} not found. Initializing default camera.".
                format(device_id))
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(
            width=color_frame_size[0],
            height=color_frame_size[1],
            fps=color_fps,
            color_format="yuv",
            preset=self.stream_preset,
        )
        depthstream = DepthStream(
            width=depth_frame_size[0],
            height=depth_frame_size[1],
            fps=depth_fps,
            preset=self.stream_preset,
        )
        pointstream = PointStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)

        self.streams = [colorstream, depthstream, pointstream]
        if self.align_streams:
            dacstream = DACStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)
            dacstream.name = "depth"  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)
        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                           self.frame_size)

        self.update_menu()
        self._needs_restart = False

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR,
                               rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next(
                    (s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {
            "device_id":
            self.device.device_id if self.device is not None else 0,
            "frame_size":
            self.frame_size,
            "frame_rate":
            self.frame_rate,
            "depth_frame_size":
            self.depth_frame_size,
            "depth_frame_rate":
            self.depth_frame_rate,
            "preview_depth":
            self.preview_depth,
            "record_depth":
            self.record_depth,
            "align_streams":
            self.align_streams,
            "device_options":
            self.controls.export_presets() if self.controls is not None else
            (),
            "stream_preset":
            self.stream_preset,
        }

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(
                self.streams[0].stream)
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(
                self.streams[1].stream)
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart:
            self.restart_device()
            time.sleep(0.05)
        elif not self.online:
            time.sleep(0.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            logger.warning(
                "Realsense failed to provide frames. Attempting to reinit.")
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(
            ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(
            ui.Switch("preview_depth", self, label="Preview Depth"))
        self.menu.append(
            ui.Switch("align_streams",
                      self,
                      label="Align Streams",
                      setter=align_and_restart))

        def toggle_depth_display():
            def on_depth_mouse_button(window, button, action, mods):
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
                    self.mouse_drag = True
                if (button == glfw.GLFW_MOUSE_BUTTON_LEFT
                        and action == glfw.GLFW_RELEASE):
                    self.mouse_drag = False

            if self.depth_window is None:
                self.pitch = 0
                self.yaw = 0

                win_size = glfw.glfwGetWindowSize(self.g_pool.main_window)
                self.depth_window = glfw.glfwCreateWindow(
                    win_size[0], win_size[1], "3D Point Cloud")
                glfw.glfwSetMouseButtonCallback(self.depth_window,
                                                on_depth_mouse_button)
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(self.depth_window)
                gl_utils.basic_gl_setup()
                gl_utils.make_coord_system_norm_based()

                # refresh speed settings
                glfw.glfwSwapInterval(0)

                glfw.glfwMakeContextCurrent(active_window)

        native_presets = [
            ("None", None),
            ("Best Quality", rs_preset.RS_PRESET_BEST_QUALITY),
            ("Largest image", rs_preset.RS_PRESET_LARGEST_IMAGE),
            ("Highest framerate", rs_preset.RS_PRESET_HIGHEST_FRAMERATE),
        ]

        def set_stream_preset(val):
            if self.stream_preset != val:
                self.stream_preset = val
                self.restart_device()

        self.menu.append(
            ui.Selector(
                "stream_preset",
                self,
                setter=set_stream_preset,
                labels=[preset[0] for preset in native_presets],
                selection=[preset[1] for preset in native_presets],
                label="Stream preset",
            ))
        color_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_COLOR],
                             reverse=True)
        selector = ui.Selector(
            "frame_size",
            self,
            # setter=,
            selection=color_sizes,
            label="Resolution" if self.align_streams else "Color Resolution",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def color_fps_getter():
            avail_fps = [
                fps for fps in self._available_modes[rs_stream.RS_STREAM_COLOR]
                [self.frame_size] if self.depth_frame_rate % fps == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "frame_rate",
            self,
            # setter=,
            selection_getter=color_fps_getter,
            label="Color Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        if not self.align_streams:
            depth_sizes = sorted(
                self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True)
            selector = ui.Selector(
                "depth_frame_size",
                self,
                # setter=,
                selection=depth_sizes,
                label="Depth Resolution",
            )
            selector.read_only = self.stream_preset is not None
            self.menu.append(selector)

        def depth_fps_getter():
            avail_fps = [
                fps for fps in self._available_modes[rs_stream.RS_STREAM_DEPTH]
                [self.depth_frame_size] if fps % self.frame_rate == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "depth_frame_rate",
            self,
            selection_getter=depth_fps_getter,
            label="Depth Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(
                        self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info("Resetting some device options failed")
                    logger.debug("Reason: {}".format(err))
                finally:
                    self.controls.refresh()

        self.menu.append(ui.Button("Point Cloud Window", toggle_depth_display))
        sensor_control = ui.Growing_Menu(label="Sensor Settings")
        sensor_control.append(
            ui.Button("Reset device options to default", reset_options))
        for ctrl in sorted(self.controls.values(),
                           key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if (ctrl.range.min == 0.0 and ctrl.range.max == 1.0
                    and ctrl.range.step == 1.0):
                sensor_control.append(
                    ui.Switch("value",
                              ctrl,
                              label=ctrl.label,
                              off_val=0.0,
                              on_val=1.0))
            else:
                sensor_control.append(
                    ui.Slider(
                        "value",
                        ctrl,
                        label=ctrl.label,
                        min=ctrl.range.min,
                        max=ctrl.range.max,
                        step=ctrl.range.step,
                    ))
        self.menu.append(sensor_control)

    def gl_display(self):
        from math import floor

        if self.depth_window is not None and glfw.glfwWindowShouldClose(
                self.depth_window):
            glfw.glfwDestroyWindow(self.depth_window)
            self.depth_window = None

        if self.depth_window is not None and self._recent_depth_frame is not None:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self.depth_window)

            win_size = glfw.glfwGetFramebufferSize(self.depth_window)
            gl_utils.adjust_gl_view(win_size[0], win_size[1])
            pos = glfw.glfwGetCursorPos(self.depth_window)
            if self.mouse_drag:
                self.pitch = np.clip(self.pitch + (pos[1] - self.last_pos[1]),
                                     -80, 80)
                self.yaw = np.clip(self.yaw - (pos[0] - self.last_pos[0]),
                                   -120, 120)
            self.last_pos = pos

            glClearColor(0, 0, 0, 0)
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
            glMatrixMode(GL_PROJECTION)
            glLoadIdentity()
            gluPerspective(60, win_size[0] / win_size[1], 0.01, 20.0)
            glMatrixMode(GL_MODELVIEW)
            glLoadIdentity()
            gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
            glTranslatef(0, 0, 0.5)
            glRotated(self.pitch, 1, 0, 0)
            glRotated(self.yaw, 0, 1, 0)
            glTranslatef(0, 0, -0.5)

            # glPointSize(2)
            glEnable(GL_DEPTH_TEST)
            extrinsics = self.device.get_device_extrinsics(
                rs_stream.RS_STREAM_DEPTH, rs_stream.RS_STREAM_COLOR)
            depth_frame = self._recent_depth_frame
            color_frame = self._recent_frame
            depth_scale = self.device.depth_scale

            glEnableClientState(GL_VERTEX_ARRAY)

            pointcloud = self.device.pointcloud
            glVertexPointer(3, GL_FLOAT, 0, pointcloud)
            glEnableClientState(GL_COLOR_ARRAY)
            depth_to_color = np.zeros(
                depth_frame.height * depth_frame.width * 3, np.uint8)
            rsutilwrapper.project_pointcloud_to_pixel(
                depth_to_color,
                self.device.depth_intrinsics,
                self.device.color_intrinsics,
                extrinsics,
                pointcloud,
                self._recent_frame.bgr,
            )
            glColorPointer(3, GL_UNSIGNED_BYTE, 0, depth_to_color)
            glDrawArrays(GL_POINTS, 0, depth_frame.width * depth_frame.height)
            gl_utils.glFlush()
            glDisable(GL_DEPTH_TEST)
            # gl_utils.make_coord_system_norm_based()
            glfw.glfwSwapBuffers(self.depth_window)
            glfw.glfwMakeContextCurrent(active_window)

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def restart_device(
        self,
        device_id=None,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if device_id is None:
            if self.device is not None:
                device_id = self.device.device_id
            else:
                device_id = 0
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all({
            "subject": "realsense_source.restart",
            "device_id": device_id,
            "color_frame_size": color_frame_size,
            "color_fps": color_fps,
            "depth_frame_size": depth_frame_size,
            "depth_fps": depth_fps,
            "device_options": device_options,
        })

    def on_click(self, pos, button, action):
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
            self.mouse_drag = True
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
            self.mouse_drag = False

    def on_notify(self, notification):
        if notification["subject"] == "realsense_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"],
                                       notification["start_time_synced"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc, start_time_synced):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = MPEG_Writer(video_path, start_time_synced)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
Exemple #9
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, MPEG_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        from background_helper import IPC_Logging_Task_Proxy

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D, Detector_Dummy

        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D,
            Detector_Dummy.__name__: Detector_Dummy,
        }

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.process = "eye{}".format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            plugins = (g_pool.capture_manager, g_pool.capture)
            for plugin in plugins:
                if plugin.on_drop(paths):
                    break

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir,
                         "user_settings_eye{}".format(eye_id)))
        if VersionFormat(session_settings.get("version",
                                              "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode",
                                                   "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image":
            "Raw eye camera image. This uses the least amount of CPU power",
            "roi":
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm":
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        capture_manager_settings = session_settings.get(
            "capture_manager_settings", ("UVC_Manager", {}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__: c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](
            g_pool, **manager_settings)

        if eye_id == 0:
            cap_src = [
                "Pupil Cam3 ID0", "Pupil Cam2 ID0", "Pupil Cam1 ID0", "HD-6000"
            ]
        else:
            cap_src = ["Pupil Cam3 ID1", "Pupil Cam2 ID1", "Pupil Cam1 ID1"]

        # Initialize capture
        default_settings = (
            "UVC_Source",
            {
                "preferred_names": cap_src,
                "frame_size": (320, 240),
                "frame_rate": 120
            },
        )

        capture_source_settings = overwrite_cap_settings or session_settings.get(
            "capture_settings", default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__: c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](
            g_pool, **source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi(
            (g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(
                roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get(
            "pupil_detector_settings", None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            "last_pupil_detector", Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,
                                                    pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.deinit_ui()
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_ui()

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = g_pool.capture.frame_size
        width *= 2
        height *= 2
        width += icon_bar_width
        width, height = session_settings.get("window_size", (width, height))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position",
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(
            np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu("Settings",
                                           pos=(-500, 0),
                                           size=(-icon_bar_width, 0),
                                           header_pos="left")
        g_pool.iconbar = ui.Scrolling_Menu("Icons",
                                           pos=(-icon_bar_width, 0),
                                           size=(0, 0),
                                           header_pos="hidden")
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            ))

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos,
                        g_pool.capture.frame_size)  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,
                                                     g_pool.u_r.handle_size,
                                                     g_pool.u_r.handle_size):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size",
                                          set_window_size))
        general_settings.append(
            ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            ))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)

        detector_selector = ui.Selector(
            "pupil_detector",
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_Dummy, Detector_2D, Detector_3D],
            labels=["disabled", "C++ 2d detector", "C++ 3d detector"],
            label="Detection method",
        )
        general_settings.append(detector_selector)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        g_pool.pupil_detector.init_ui()
        g_pool.capture.init_ui()
        g_pool.capture_manager.init_ui()
        g_pool.writer = None

        def replace_source(source_class_name, source_settings):
            g_pool.capture.deinit_ui()
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](
                g_pool, **source_settings)
            g_pool.capture.init_ui()
            if g_pool.writer:
                logger.info("Done recording.")
                try:
                    g_pool.writer.release()
                except RuntimeError:
                    logger.error("No eye video recorded")
                g_pool.writer = None

        g_pool.replace_source = replace_source  # for ndsi capture

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "set_detection_mapping_mode":
                    if notification["mode"] == "3d":
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    elif notification["mode"] == "2d":
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                    else:
                        if not isinstance(g_pool.pupil_detector,
                                          Detector_Dummy):
                            set_detector(Detector_Dummy)
                        detector_selector.read_only = True
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        start_time_synced = notification["start_time_synced"]
                        logger.info(
                            "Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path,
                                                  "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, start_time_synced)
                        elif hasattr(g_pool.capture._recent_frame,
                                     "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = MPEG_Writer(
                                video_path, start_time_synced)
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify({
                        "subject": "meta.doc",
                        "actor": "eye{}".format(eye_id),
                        "doc": eye.__doc__,
                    })
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (subject.startswith("start_eye_capture")
                      and notification["target"] == g_pool.process):
                    replace_source(notification["name"], notification["args"])
                elif notification["subject"].startswith(
                        "pupil_detector.set_property"):
                    target_process = notification.get("target", g_pool.process)
                    should_apply = target_process == g_pool.process

                    if should_apply:
                        try:
                            property_name = notification["name"]
                            property_value = notification["value"]
                            if "2d" in notification["subject"]:
                                g_pool.pupil_detector.set_2d_detector_property(
                                    property_name, property_value)
                            elif "3d" in notification["subject"]:
                                if not isinstance(g_pool.pupil_detector,
                                                  Detector_3D):
                                    raise ValueError(
                                        "3d properties are only available"
                                        " if 3d detector is active")
                                g_pool.pupil_detector.set_3d_detector_property(
                                    property_name, property_value)
                            elif property_name == "roi":
                                try:
                                    # Modify the ROI with the values sent over network
                                    minX, maxX, minY, maxY = property_value
                                    g_pool.u_r.set([
                                        max(g_pool.u_r.min_x, int(minX)),
                                        max(g_pool.u_r.min_y, int(minY)),
                                        min(g_pool.u_r.max_x, int(maxX)),
                                        min(g_pool.u_r.max_y, int(maxY)),
                                    ])
                                except ValueError as err:
                                    raise ValueError(
                                        "ROI needs to be list of 4 integers:"
                                        "(minX, maxX, minY, maxY)") from err
                            else:
                                raise KeyError(
                                    "Notification subject does not "
                                    "specifiy detector type nor modify ROI.")
                            logger.debug("`{}` property set to {}".format(
                                property_name, property_value))
                        except KeyError:
                            logger.error("Malformed notification received")
                            logger.debug(traceback.format_exc())
                        except (ValueError, TypeError):
                            logger.error("Invalid property or value")
                            logger.debug(traceback.format_exc())
                elif notification["subject"].startswith(
                        "pupil_detector.broadcast_properties"):
                    target_process = notification.get("target", g_pool.process)
                    should_respond = target_process == g_pool.process
                    if should_respond:
                        props = g_pool.pupil_detector.get_detector_properties()
                        properties_broadcast = {
                            "subject":
                            "pupil_detector.properties.{}".format(eye_id),
                            **props,  # add properties to broadcast
                        }
                        ipc_socket.notify(properties_broadcast)
                g_pool.capture.on_notify(notification)
                g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get("frame")
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                        f_height,
                        f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.
                                format(type(frame), frame_publish_format))
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send({
                            "topic":
                            "frame.eye.{}".format(eye_id),
                            "width":
                            frame.width,
                            "height":
                            frame.height,
                            "index":
                            frame.index,
                            "timestamp":
                            frame.timestamp,
                            "format":
                            frame_publish_format,
                            "__raw_data__": [data],
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(
                    frame, g_pool.u_r, g_pool.display_mode == "algorithm")
                if result is not None:
                    result["id"] = eye_id
                    result["topic"] = "pupil.{}".format(eye_id)
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == "algorithm":
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ("camera_image", "roi"):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3),
                                                  g_pool.flip)
                    if frame and result:
                        if result["method"] == "3d c++":
                            eye_ball = result["projected_sphere"]
                            try:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(eye_ball["center"][0]),
                                        int(eye_ball["center"][1]),
                                    ),
                                    (
                                        int(eye_ball["axes"][0] / 2),
                                        int(eye_ball["axes"][1] / 2),
                                    ),
                                    int(eye_ball["angle"]),
                                    0,
                                    360,
                                    8,
                                )
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(
                                    pts,
                                    2,
                                    RGBA(0.0, 0.9, 0.1,
                                         result["model_confidence"]),
                                )
                        if result["confidence"] > 0:
                            if "ellipse" in result:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(result["ellipse"]["center"][0]),
                                        int(result["ellipse"]["center"][1]),
                                    ),
                                    (
                                        int(result["ellipse"]["axes"][0] / 2),
                                        int(result["ellipse"]["axes"][1] / 2),
                                    ),
                                    int(result["ellipse"]["angle"]),
                                    0,
                                    360,
                                    15,
                                )
                                confidence = result["confidence"] * 0.7
                                draw_polyline(pts, 1,
                                              RGBA(1.0, 0, 0, confidence))
                                draw_points(
                                    [result["ellipse"]["center"]],
                                    size=20,
                                    color=RGBA(1.0, 0.0, 0.0, confidence),
                                    sharpness=1.0,
                                )

                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_pixel_based((f_height, f_width, 3),
                                                  g_pool.flip)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    make_coord_system_pixel_based((*window_size[::-1], 3),
                                                  g_pool.flip)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    unused_elements = g_pool.gui.update()
                    for butt in unused_elements.buttons:
                        uroi_on_mouse_button(*butt)

                    make_coord_system_pixel_based((*window_size[::-1], 3),
                                                  g_pool.flip)

                    g_pool.pupil_detector.visualize(
                    )  # detector decides if we visualize or not

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["capture_settings"] = (
            g_pool.capture.class_name,
            g_pool.capture.get_init_dict(),
        )
        session_settings["capture_manager_settings"] = (
            g_pool.capture_manager.class_name,
            g_pool.capture_manager.get_init_dict(),
        )
        session_settings["window_position"] = glfw.glfwGetWindowPos(
            main_window)
        session_settings["version"] = str(g_pool.version)
        session_settings[
            "last_pupil_detector"] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            "pupil_detector_settings"] = g_pool.pupil_detector.get_settings()

        session_window_size = glfw.glfwGetWindowSize(main_window)
        if 0 not in session_window_size:
            session_settings["window_size"] = session_window_size

        session_settings.close()

        g_pool.capture.deinit_ui()
        g_pool.capture_manager.deinit_ui()
        g_pool.pupil_detector.deinit_ui()

        g_pool.pupil_detector.cleanup()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")