Example #1
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        # if self.drag_mode:
        #     color1 = (0.,.8,.5,.5)
        #     color2 = (0.,.8,.5,1.)
        # else:
        color1 = (1,1,1,0.4)#(.25,.8,.8,.5)
        color2 = (1,1,1,1.)#(.25,.8,.8,1.)

        thickness = 10.
        draw_polyline(verts=[(0,0),(self.current_frame_index,0)],
            thickness=thickness,color=RGBA(*color1))
        draw_polyline(verts=[(self.current_frame_index,0),(self.frame_count,0)],
            thickness=thickness,color=RGBA(*color1))
        if not self.drag_mode:
            draw_points([(self.current_frame_index,0)],color=RGBA(*color1),size=30)
        draw_points([(self.current_frame_index,0)],color=RGBA(*color2),size=20)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
Example #2
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        r = p_window_size[0] / 15.0
        # compensate for radius of marker
        gl.glOrtho(-r, p_window_size[0] + r, p_window_size[1] + r, -r, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        # hacky way of scaling and fitting in different window rations/sizes
        grid = _make_grid() * min((p_window_size[0], p_window_size[1] * 5.5 / 4.0))
        # center the pattern
        grid -= np.mean(grid)
        grid += (p_window_size[0] / 2 - r, p_window_size[1] / 2 + r)

        draw_points(grid, size=r, color=RGBA(0.0, 0.0, 0.0, 1), sharpness=0.95)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch {} more times to close window.".format(self.clicks_to_close),
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        r = p_window_size[0]/15.
        # compensate for radius of marker
        gl.glOrtho(-r,p_window_size[0]+r,p_window_size[1]+r,-r ,-1,1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        #hacky way of scaling and fitting in different window rations/sizes
        grid = _make_grid()*min((p_window_size[0],p_window_size[1]*5.5/4.))
        #center the pattern
        grid -= np.mean(grid)
        grid +=(p_window_size[0]/2-r,p_window_size[1]/2+r)

        draw_points(grid,size=r,color=RGBA(0.,0.,0.,1),sharpness=0.95)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch %s more times to close window.'%self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #4
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad, 1 + self.v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0.0, 0.8, 0.5, 0.5)
            color2 = (0.0, 0.8, 0.5, 1.0)
        else:
            color1 = (0.25, 0.8, 0.8, 0.5)
            color2 = (0.25, 0.8, 0.8, 1.0)

        draw_polyline(verts=[(0, 0), (self.current_frame_index, 0)], color=RGBA(*color1))
        draw_polyline(verts=[(self.current_frame_index, 0), (self.frame_count, 0)], color=RGBA(0.5, 0.5, 0.5, 0.5))
        draw_points([(self.current_frame_index, 0)], color=RGBA(*color1), size=40)
        draw_points([(self.current_frame_index, 0)], color=RGBA(*color2), size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
    def gl_display(self):
        """
        This is where we can draw to any gl surface
        by default this is the main window, below we change that
        """

        # active our window
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self.window)

        # start drawing things:
        gl_utils.clear_gl_screen()
        # set coordinate system to be between 0 and 1 of the extents of the window
        gl_utils.make_coord_system_norm_based()
        # draw the image
        draw_gl_texture(self.img)

        # make coordinte system identical to the img pixel coordinate system
        gl_utils.make_coord_system_pixel_based(self.img.shape)
        # draw some points on top of the image
        # notice how these show up in our window but not in the main window
        draw_points([(200, 400), (600, 400)], color=RGBA(0., 4., .8, .8), size=self.my_var)
        draw_polyline([(200, 400), (600, 400)], color=RGBA(0., 4., .8, .8), thickness=3)

        # since this is our own window we need to swap buffers in the plugin
        glfwSwapBuffers(self.window)

        # and finally reactive the main window
        glfwMakeContextCurrent(active_window)
Example #6
0
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        glOrtho(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad,
            1 + self.v_pad, -1, 1
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color1 = RGBA(.1, .9, .2, 1.)
        color2 = RGBA(.1, .9, .2, 1.)

        if self.in_mark != 0 or self.out_mark != self.frame_count:
            draw_polyline([(self.in_mark, 0), (self.out_mark, 0)],
                          color=color1,
                          thickness=20.)
        draw_points([
            (self.in_mark, 0),
        ], color=color2, size=20)
        draw_points([
            (self.out_mark, 0),
        ], color=color2, size=20)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
    def gl_display(self):
        # normalize coordinate system, no need this step in utility functions
        with gl_utils.Coord_System(0, 1, 0, 1):
            ref_point_norm = [r['norm_pos'] for r in self.circle_marker_positions
                              if self.g_pool.capture.get_frame_index() == r['index']]
            cygl_utils.draw_points(ref_point_norm, size=35, color=cygl_utils.RGBA(0, .5, 0.5, .7))
            cygl_utils.draw_points(ref_point_norm, size=5, color=cygl_utils.RGBA(.0, .9, 0.0, 1.0))

            manual_refs_in_frame = [r for r in self.manual_ref_positions
                                    if self.g_pool.capture.get_frame_index() in r['index_range']]
            current = self.g_pool.capture.get_frame_index()
            for mr in manual_refs_in_frame:
                if mr['index'] == current:
                    cygl_utils.draw_points([mr['norm_pos']], size=35, color=cygl_utils.RGBA(.0, .0, 0.9, .8))
                    cygl_utils.draw_points([mr['norm_pos']], size=5, color=cygl_utils.RGBA(.0, .9, 0.0, 1.0))
                else:
                    distance = abs(current - mr['index'])
                    range_radius = (mr['index_range'][-1] - mr['index_range'][0]) // 2
                    # scale alpha [.1, .9] depending on distance to current frame
                    alpha = distance / range_radius
                    alpha = 0.1 * alpha + 0.9 * (1. - alpha)
                    # Use draw_progress instead of draw_circle. draw_circle breaks
                    # because of the normalized coord-system.
                    cygl_utils.draw_progress(mr['norm_pos'], 0., 0.999,
                                             inner_radius=20.,
                                             outer_radius=35.,
                                             color=cygl_utils.RGBA(.0, .0, 0.9, alpha))
                    cygl_utils.draw_points([mr['norm_pos']], size=5, color=cygl_utils.RGBA(.0, .9, 0.0, alpha))

        # calculate correct timeline height. Triggers timeline redraw only if changed
        self.timeline.content_height = max(0.001, self.timeline_line_height * len(self.sections))
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """
        if self.active or self.visualize:
            # Draw hand detection results
            for (x1, y1, x2, y2), fingertips in zip(self.hand_viz, self.finger_viz):
                pts = np.array([[x1, y1], [x1, y2], [x2, y2], [x2, y1], [x1, y1]], np.int32)
                cygl_utils.draw_polyline(pts, thickness=3 * self.g_pool.gui_user_scale, color=cygl_utils.RGBA(0., 1., 0., 1.))
                for tip in fingertips:
                    if tip is not None:
                        y, x = tip
                        cygl_utils.draw_progress((x, y), 0., 1.,
                                      inner_radius=25 * self.g_pool.gui_user_scale,
                                      outer_radius=35 * self.g_pool.gui_user_scale,
                                      color=cygl_utils.RGBA(1., 1., 1., 1.),
                                      sharpness=0.9)

                        cygl_utils.draw_points([(x, y)], size=10 * self.g_pool.gui_user_scale,
                                    color=cygl_utils.RGBA(1., 1., 1., 1.),
                                    sharpness=0.9)
Example #9
0
    def _draw_marker_toggles(self, surface):
        active_markers_by_type = {}
        inactive_markers_by_type = {}

        for marker in self.tracker.markers:
            marker_type = marker.marker_type
            if (
                marker_type == Surface_Marker_Type.SQUARE
                and marker.perimeter < self.tracker.marker_detector.marker_min_perimeter
            ):
                continue

            centroid = marker.centroid()
            if marker.uid in surface.registered_markers_dist.keys():
                active_markers = active_markers_by_type.get(marker_type, [])
                active_markers.append(centroid)
                active_markers_by_type[marker_type] = active_markers
            else:
                inactive_markers = inactive_markers_by_type.get(marker_type, [])
                inactive_markers.append(centroid)
                inactive_markers_by_type[marker_type] = inactive_markers

        for marker_type, inactive_markers in inactive_markers_by_type.items():
            color_rgb = SURFACE_MARKER_TOGGLE_INACTIVE_COLOR_RGB_BY_TYPE[marker_type]
            color_rgba = rgb_to_rgba(color_rgb, alpha=0.8)
            pyglui_utils.draw_points(
                inactive_markers, size=20, color=pyglui_utils.RGBA(*color_rgba)
            )

        for marker_type, active_markers in active_markers_by_type.items():
            color_rgb = SURFACE_MARKER_TOGGLE_ACTIVE_COLOR_RGB_BY_TYPE[marker_type]
            color_rgba = rgb_to_rgba(color_rgb, alpha=0.8)
            pyglui_utils.draw_points(
                active_markers, size=20, color=pyglui_utils.RGBA(*color_rgba)
            )
Example #10
0
def draw_ellipse(ellipse: Dict,
                 rgba: Tuple,
                 thickness: float,
                 draw_center: bool = False):
    try:
        pts = cv2.ellipse2Poly(
            center=(int(ellipse["center"][0]), int(ellipse["center"][1])),
            axes=(int(ellipse["axes"][0] / 2), int(ellipse["axes"][1] / 2)),
            angle=int(ellipse["angle"]),
            arcStart=0,
            arcEnd=360,
            delta=8,
        )
    except Exception as e:
        # Known issues:
        #   - There are reports of negative eye_ball axes when drawing the 3D eyeball
        #     outline, which will raise cv2.error. TODO: Investigate cause in detectors.
        logger.debug("Error drawing ellipse! Skipping...\n"
                     f"ellipse: {ellipse}\n"
                     f"{type(e)}: {e}")

    draw_polyline(pts, thickness, RGBA(*rgba))
    if draw_center:
        draw_points(
            [ellipse["center"]],
            size=20,
            color=RGBA(*rgba),
            sharpness=1.0,
        )
Example #11
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad,
            1 + self.v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0., .8, .5, .5)
            color2 = (0., .8, .5, 1.)
        else:
            color1 = (.25, .8, .8, .5)
            color2 = (.25, .8, .8, 1.)

        draw_polyline(verts=[(0, 0), (self.current_frame_index, 0)],
                      color=RGBA(*color1))
        draw_polyline(verts=[(self.current_frame_index, 0),
                             (self.frame_count, 0)],
                      color=RGBA(.5, .5, .5, .5))
        draw_points([(self.current_frame_index, 0)],
                    color=RGBA(*color1),
                    size=40)
        draw_points([(self.current_frame_index, 0)],
                    color=RGBA(*color2),
                    size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
Example #12
0
def draw_pupil_outline(pupil_detection_result_2d):
    """Requires `"ellipse" in pupil_detection_result_2d`"""
    if pupil_detection_result_2d["confidence"] <= 0.0:
        return

    try:
        pts = cv2.ellipse2Poly(
            (
                int(pupil_detection_result_2d["ellipse"]["center"][0]),
                int(pupil_detection_result_2d["ellipse"]["center"][1]),
            ),
            (
                int(pupil_detection_result_2d["ellipse"]["axes"][0] / 2),
                int(pupil_detection_result_2d["ellipse"]["axes"][1] / 2),
            ),
            int(pupil_detection_result_2d["ellipse"]["angle"]),
            0,
            360,
            15,
        )
    except ValueError:
        # Happens when converting 'nan' to int
        # TODO: Investigate why results are sometimes 'nan'
        return

    confidence = pupil_detection_result_2d["confidence"] * 0.7
    draw_polyline(pts, 1, RGBA(1.0, 0, 0, confidence))
    draw_points(
        [pupil_detection_result_2d["ellipse"]["center"]],
        size=20,
        color=RGBA(1.0, 0.0, 0.0, confidence),
        sharpness=1.0,
    )
Example #13
0
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        if self.mode == "Show Markers and Surfaces":
            for m in self.markers:
                hat = np.array(
                    [[[0, 0], [0, 1], [.5, 1.3], [1, 1], [1, 0], [0, 0]]],
                    dtype=np.float32)
                hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
                if m['perimeter'] >= self.min_marker_perimeter and m[
                        'id_confidence'] > self.min_id_confidence:
                    draw_polyline(hat.reshape((6, 2)),
                                  color=RGBA(0.1, 1., 1., .5))
                    draw_polyline(hat.reshape((6, 2)),
                                  color=RGBA(0.1, 1., 1., .3),
                                  line_type=GL_POLYGON)
                else:
                    draw_polyline(hat.reshape((6, 2)),
                                  color=RGBA(0.1, 1., 1., .5))

            for s in self.surfaces:
                if s not in self.edit_surfaces and s is not self.marker_edit_surface:
                    s.gl_draw_frame(self.img_shape)

            for s in self.edit_surfaces:
                s.gl_draw_frame(self.img_shape,
                                highlight=True,
                                surface_mode=True)
                s.gl_draw_corners()

            if self.marker_edit_surface:
                inc = []
                exc = []
                for m in self.markers:
                    if m['perimeter'] >= self.min_marker_perimeter:
                        if m['id'] in self.marker_edit_surface.markers:
                            inc.append(m['centroid'])
                        else:
                            exc.append(m['centroid'])
                draw_points(exc, size=20, color=RGBA(1., 0.5, 0.5, .8))
                draw_points(inc, size=20, color=RGBA(0.5, 1., 0.5, .8))
                self.marker_edit_surface.gl_draw_frame(self.img_shape,
                                                       color=(0.0, 0.9, 0.6,
                                                              1.0),
                                                       highlight=True,
                                                       marker_mode=True)

        elif self.mode == 'Show Heatmaps':
            for s in self.surfaces:
                if self.g_pool.app != 'player':
                    s.generate_heatmap()
                s.gl_display_heatmap()

        for s in self.surfaces:
            if self.locate_3d:
                s.gl_display_in_window_3d(self.g_pool.image_tex)
            else:
                s.gl_display_in_window(self.g_pool.image_tex)
 def _draw_current_reference(self, current_reference):
     with self._frame_coordinate_system:
         cygl_utils.draw_points(
             [current_reference.screen_pos],
             size=35,
             color=cygl_utils.RGBA(0, 0.5, 0.5, 0.7),
         )
         self._draw_inner_dot(current_reference)
 def _draw_current_reference(self, current_reference):
     with self._frame_coordinate_system:
         cygl_utils.draw_points(
             [current_reference.screen_pos],
             size=35,
             color=cygl_utils.RGBA(0, 0.5, 0.5, 0.7),
         )
         self._draw_inner_dot(current_reference)
Example #16
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(
            self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = 110 * self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = .7 * r
        screen_pos = map_value(
            self.display_pos[0],
            out_range=(pad, p_window_size[0] - pad)), map_value(
                self.display_pos[1], out_range=(p_window_size[1] - pad, pad))
        alpha = interp_fn(
            self.screen_marker_state, 0., 1.,
            float(self.sample_duration + self.lead_in + self.lead_out),
            float(self.lead_in), float(self.sample_duration + self.lead_in))

        draw_concentric_circles(screen_pos, r, 4, alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],
                        size=10 * self.marker_scale,
                        color=RGBA(0., .8, 0., alpha),
                        sharpness=0.5)
        else:
            draw_points([screen_pos],
                        size=10 * self.marker_scale,
                        color=RGBA(0.8, 0., 0., alpha),
                        sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.))
            self.glfont.draw_text(
                p_window_size[0] / 2., p_window_size[1] / 4.,
                'Touch {} more times to cancel {}.'.format(
                    self.clicks_to_close, self.mode_pretty))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #17
0
    def draw_pupil_data(self, key, width, height, scale):
        right = self.cache[key]["right"]
        left = self.cache[key]["left"]

        with gl_utils.Coord_System(*self.cache[key]["xlim"], *self.cache[key]["ylim"]):
            cygl_utils.draw_points(
                right, size=2.0 * scale, color=COLOR_LEGEND_EYE_RIGHT
            )
            cygl_utils.draw_points(left, size=2.0 * scale, color=COLOR_LEGEND_EYE_LEFT)
Example #18
0
    def draw_pupil_data(self, key, width, height, scale):
        right = self.cache[key]["right"]
        left = self.cache[key]["left"]

        with gl_utils.Coord_System(*self.cache[key]["xlim"], *self.cache[key]["ylim"]):
            cygl_utils.draw_points(
                right, size=2.0 * scale, color=COLOR_LEGEND_EYE_RIGHT
            )
            cygl_utils.draw_points(left, size=2.0 * scale, color=COLOR_LEGEND_EYE_LEFT)
Example #19
0
    def _draw_surface_corner_handles(self, surface):
        img_corners = surface.map_from_surf(
            self.norm_corners.copy(),
            self.tracker.camera_model,
            compensate_distortion=False,
        )

        pyglui_utils.draw_points(
            img_corners, size=20, color=pyglui_utils.RGBA(*self.color_primary, 0.5)
        )
Example #20
0
    def _draw_surface_corner_handles(self, surface):
        img_corners = surface.map_from_surf(
            self.norm_corners.copy(),
            self.tracker.camera_model,
            compensate_distortion=False,
        )

        handle_color_rgba = rgb_to_rgba(self.color_primary_rgb, alpha=0.5)
        pyglui_utils.draw_points(
            img_corners, size=20, color=pyglui_utils.RGBA(*handle_color_rgba)
        )
Example #21
0
 def draw_data(self, width, height, scale, channel):
     with gl_utils.Coord_System(*self.cache["xlim"], *self.cache["ylim"]):
         channels = np.shape(self.cache["data"])[1]
         for i in range(channels):
             channel_data =np.reshape(self.cache["data"][:,channel],(-1)).tolist()
             whole_data = tuple(zip(channel_data, self.timestamps))
             if len(channel_data) ==1:
                 return
             cygl_utils.draw_points(
                 whole_data, size=2 * scale, color=COLOR_LEGEND_WORLD
             )
    def draw_pupil_data(self, key, width, height, scale):
        right = self.cache[key]['right']
        left = self.cache[key]['left']

        with gl_utils.Coord_System(*self.cache[key]['xlim'],
                                   *self.cache[key]['ylim']):
            cygl_utils.draw_points(right,
                                   size=2. * scale,
                                   color=COLOR_LEGEND_EYE_RIGHT)
            cygl_utils.draw_points(left,
                                   size=2. * scale,
                                   color=COLOR_LEGEND_EYE_LEFT)
Example #23
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = 110 * self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 0.6 * r
        screen_pos = (
            map_value(self.display_pos[0], out_range=(pad, p_window_size[0] - pad)),
            map_value(self.display_pos[1], out_range=(p_window_size[1] - pad, pad)),
        )
        alpha = interp_fn(
            self.screen_marker_state,
            0.0,
            1.0,
            float(self.sample_duration + self.lead_in + self.lead_out),
            float(self.lead_in),
            float(self.sample_duration + self.lead_in),
        )

        draw_concentric_circles(screen_pos, r, 6, alpha)
        # some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos], size=5, color=RGBA(0.0, 0.8, 0.0, alpha), sharpness=0.5)
        else:
            draw_points([screen_pos], size=5, color=RGBA(0.8, 0.0, 0.0, alpha), sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch %s more times to cancel calibration." % self.clicks_to_close,
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #24
0
 def draw_recent_pupil_positions(self):
     try:
         for gp in self.surface.gaze_history:
             pyglui_utils.draw_points(
                 [gp["norm_pos"]],
                 color=pyglui_utils.RGBA(0.0, 0.8, 0.5, 0.8),
                 size=80,
             )
     except AttributeError:
         # If gaze_history does not exist, we are in the Surface_Tracker_Offline.
         # In this case gaze visualizations will be drawn directly onto the scene
         # image and thus propagate to the surface crop automatically.
         pass
Example #25
0
 def draw_recent_pupil_positions(self):
     try:
         for gp in self.surface.gaze_history:
             pyglui_utils.draw_points(
                 [gp["norm_pos"]],
                 color=pyglui_utils.RGBA(0.0, 0.8, 0.5, 0.8),
                 size=80,
             )
     except AttributeError:
         # If gaze_history does not exist, we are in the Surface_Tracker_Offline.
         # In this case gaze visualizations will be drawn directly onto the scene
         # image and thus propagate to the surface crop automatically.
         pass
Example #26
0
 def _draw_surface_menu_buttons(
     self, surface, surface_edit_anchor, marker_edit_anchor
 ):
     # Buttons
     pyglui_utils.draw_points(
         [marker_edit_anchor], color=pyglui_utils.RGBA(*self.color_primary)
     )
     if surface in self._edit_surf_markers:
         pyglui_utils.draw_points(
             [marker_edit_anchor],
             size=13,
             color=pyglui_utils.RGBA(*self.color_secondary),
         )
     pyglui_utils.draw_points(
         [surface_edit_anchor], color=pyglui_utils.RGBA(*self.color_primary)
     )
     if surface in self._edit_surf_corners:
         pyglui_utils.draw_points(
             [surface_edit_anchor],
             size=13,
             color=pyglui_utils.RGBA(*self.color_secondary),
         )
     # Text
     self._draw_text(
         (surface_edit_anchor[0] + 15, surface_edit_anchor[1] + 6),
         "edit surface",
         self.color_secondary,
     )
     self._draw_text(
         (marker_edit_anchor[0] + 15, marker_edit_anchor[1] + 6),
         "add/remove markers",
         self.color_secondary,
     )
Example #27
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()

        glOrtho(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color1 = RGBA(.1,.9,.2,.5)
        color2 = RGBA(.1,.9,.9,.5)

        if self.in_mark != 0 or self.out_mark != self.frame_count:
            draw_polyline( [(self.in_mark,0),(self.out_mark,0)],color=color1,thickness=2)
 
        draw_points([(self.in_mark,0),],color=color1,size=10)
        draw_points([(self.out_mark,0),],color=color1,size=10)

        if self.sections:
            for s in self.sections:
                if self.sections.index(s) != self.focus:
                    draw_polyline( [(s[0],0),(s[1],0)],color=RGBA(.1,.9,.9,.2),thickness=2)
                for mark in s:
                    draw_points([(mark,0),],color=color2,size=5)

        if self.mid_sections:
            for m in self.mid_sections:
                draw_points([(m,0),],color=RGBA(.1,.9,.9,.1),size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
Example #28
0
File: gui.py Project: zcyroot/pupil
 def _draw_surface_menu_buttons(self, surface, surface_edit_anchor,
                                marker_edit_anchor):
     # Buttons
     edit_button_color_rgba = rgb_to_rgba(self.color_primary_rgb)
     edit_anchor_color_rgba = rgb_to_rgba(self.color_secondary_rgb)
     text_color_rgba = rgb_to_rgba(self.color_secondary_rgb)
     pyglui_utils.draw_points(
         [marker_edit_anchor],
         color=pyglui_utils.RGBA(*edit_button_color_rgba))
     if surface in self._edit_surf_markers:
         pyglui_utils.draw_points(
             [marker_edit_anchor],
             size=13,
             color=pyglui_utils.RGBA(*edit_anchor_color_rgba),
         )
     pyglui_utils.draw_points(
         [surface_edit_anchor],
         color=pyglui_utils.RGBA(*edit_button_color_rgba))
     if surface in self._edit_surf_corners:
         pyglui_utils.draw_points(
             [surface_edit_anchor],
             size=13,
             color=pyglui_utils.RGBA(*edit_anchor_color_rgba),
         )
     # Text
     self._draw_text(
         (surface_edit_anchor[0] + 15, surface_edit_anchor[1] + 6),
         "edit surface",
         text_color_rgba,
     )
     self._draw_text(
         (marker_edit_anchor[0] + 15, marker_edit_anchor[1] + 6),
         "add/remove markers",
         text_color_rgba,
     )
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = getHDPIFactor(self._window)
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 90 * r
        screen_pos = map_value(
            self.display_pos[0],
            out_range=(pad, p_window_size[0] - pad)), map_value(
                self.display_pos[1], out_range=(p_window_size[1] - pad, pad))
        alpha = 1.0  #interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points([screen_pos],
                    size=60 * r2,
                    color=RGBA(0., 0., 0., alpha),
                    sharpness=0.9)
        draw_points([screen_pos],
                    size=38 * r2,
                    color=RGBA(1., 1., 1., alpha),
                    sharpness=0.8)
        draw_points([screen_pos],
                    size=19 * r2,
                    color=RGBA(0., 0., 0., alpha),
                    sharpness=0.55)

        # some feedback on the detection state
        color = RGBA(0., .8, 0., alpha) if len(
            self.markers) and self.on_position else RGBA(0.8, 0., 0., alpha)
        draw_points([screen_pos], size=3 * r2, color=color, sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.))
            self.glfont.draw_text(
                p_window_size[0] / 2., p_window_size[1] / 4.,
                'Touch {} more times to cancel calibration.'.format(
                    self.clicks_to_close))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #30
0
 def gl_draw_frame(self,
                   img_size,
                   color=(1.0, 0.2, 0.6, 1.0),
                   highlight=False,
                   surface_mode=False,
                   marker_mode=False):
     """
     draw surface and markers
     """
     if self.detected:
         r, g, b, a = color
         frame = np.array([[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]],
                          dtype=np.float32)
         hat = np.array([[[.3, .7], [.7, .7], [.5, .9], [.3, .7]]],
                        dtype=np.float32)
         hat = cv2.perspectiveTransform(hat, self.m_to_screen)
         frame = cv2.perspectiveTransform(frame, self.m_to_screen)
         alpha = min(1, self.build_up_status / self.required_build_up)
         if highlight:
             draw_polyline_norm(frame.reshape((5, 2)),
                                1,
                                RGBA(r, g, b, a * .1),
                                line_type=GL_POLYGON)
         draw_polyline_norm(frame.reshape((5, 2)), 1,
                            RGBA(r, g, b, a * alpha))
         draw_polyline_norm(hat.reshape((4, 2)), 1,
                            RGBA(r, g, b, a * alpha))
         text_anchor = frame.reshape((5, -1))[2]
         text_anchor[1] = 1 - text_anchor[1]
         text_anchor *= img_size[1], img_size[0]
         text_anchor = text_anchor[0], text_anchor[1] - 75
         surface_edit_anchor = text_anchor[0], text_anchor[1] + 25
         marker_edit_anchor = text_anchor[0], text_anchor[1] + 50
         if marker_mode:
             draw_points([marker_edit_anchor], color=RGBA(0, .8, .7))
         else:
             draw_points([marker_edit_anchor])
         if surface_mode:
             draw_points([surface_edit_anchor], color=RGBA(0, .8, .7))
         else:
             draw_points([surface_edit_anchor])
         self.glfont.set_blur(3.9)
         self.glfont.set_color_float((0, 0, 0, .8))
         self.glfont.draw_text(text_anchor[0] + 15, text_anchor[1] + 6,
                               self.marker_status())
         self.glfont.draw_text(surface_edit_anchor[0] + 15,
                               surface_edit_anchor[1] + 6, 'edit surface')
         self.glfont.draw_text(marker_edit_anchor[0] + 15,
                               marker_edit_anchor[1] + 6,
                               'add/remove markers')
         self.glfont.set_blur(0.0)
         self.glfont.set_color_float((0.1, 8., 8., .9))
         self.glfont.draw_text(text_anchor[0] + 15, text_anchor[1] + 6,
                               self.marker_status())
         self.glfont.draw_text(surface_edit_anchor[0] + 15,
                               surface_edit_anchor[1] + 6, 'edit surface')
         self.glfont.draw_text(marker_edit_anchor[0] + 15,
                               marker_edit_anchor[1] + 6,
                               'add/remove markers')
Example #31
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################


        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = 110*self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gl.glOrtho(-r*.6,p_window_size[0]+r*.6,p_window_size[1]+r*.7,-r*.7 ,-1,1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos,p_window_size,flip_y=True)
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        draw_concentric_circles(screen_pos,r,6,alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],size=5,color=RGBA(0.,1.,0.,alpha),sharpness=0.95)
        else:
            draw_points([screen_pos],size=5,color=RGBA(1.,0.,0.,alpha),sharpness=0.95)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch %s more times to cancel calibration.'%self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #32
0
    def draw_data(self, width, height, scale):

        if len(self.x[self.channel]) == 0:
            return

        x = self.x[self.channel] - self.x[self.channel][0]
        xlim = [min(x), max(x) + 0.01]
        ylim = [min(self.y[self.channel]), max(self.y[self.channel]) + 0.00001]

        with gl_utils.Coord_System(*xlim, *ylim):
            whole_data = tuple(
                zip(self.x[self.channel] - self.x[self.channel][0],
                    self.y[self.channel]))
            cygl_utils.draw_points(whole_data,
                                   size=2 * scale,
                                   color=COLOR_LEGEND_WORLD)
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)
        clear_gl_screen()

        gl.glColor3f(.80, .80, .8)
        self.draw_rect(0, 0, 2000, 2000)
        self.draw_markers()


        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = 110*self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value,in_range=(0,1),out_range=(0,1)):
            ratio = (out_range[1]-out_range[0])/(in_range[1]-in_range[0])
            return (value-in_range[0])*ratio+out_range[0]

        #pad = 0.1*1920
        pad = .7*r
        screen_pos = map_value(self.display_pos[0],out_range=(pad,p_window_size[0]-pad)),map_value(self.display_pos[1],out_range=(p_window_size[1]-pad,pad))
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))
        if self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
            draw_concentric_circles(screen_pos,r,6,alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],size=10*self.marker_scale,color=RGBA(0.,.8,0.,alpha),sharpness=0.5)
        elif self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
            draw_points([screen_pos],size=10*self.marker_scale,color=RGBA(0.8,0.,0.,alpha),sharpness=0.5)

        #if self.clicks_to_close <5:
        #    self.glfont.set_size(int(p_window_size[0]/30.))
        #   self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch {} more times to cancel calibration.'.format(self.clicks_to_close))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #34
0
 def _draw_grouped(self, data, keys, y_limits, width, height, scale):
     ts_min = self.g_pool.timestamps[0]
     ts_max = self.g_pool.timestamps[-1]
     data_raw = data[keys]
     sub_samples = np.linspace(
         0,
         self.data_len - 1,
         min(self.NUMBER_SAMPLES_TIMELINE, self.data_len),
         dtype=int,
     )
     with gl_utils.Coord_System(ts_min, ts_max, *y_limits):
         for key in keys:
             data_keyed = data_raw[key]
             if data_keyed.shape[0] == 0:
                 continue
             points = list(zip(self.data_ts[sub_samples], data_keyed[sub_samples]))
             cygl_utils.draw_points(points, size=1.5 * scale, color=self.CMAP[key])
Example #35
0
    def gl_draw_frame(self,img_size,color = (1.0,0.2,0.6,1.0),highlight=False,surface_mode=False,marker_mode=False):
        """
        draw surface and markers
        """
        if self.detected:
            r,g,b,a = color
            frame = np.array([[[0,0],[1,0],[1,1],[0,1],[0,0]]],dtype=np.float32)
            hat = np.array([[[.3,.7],[.7,.7],[.5,.9],[.3,.7]]],dtype=np.float32)
            hat = cv2.perspectiveTransform(hat,self.m_to_screen)
            frame = cv2.perspectiveTransform(frame,self.m_to_screen)
            alpha = min(1,self.build_up_status/self.required_build_up)
            if highlight:
                draw_polyline_norm(frame.reshape((5,2)),1,RGBA(r,g,b,a*.1),line_type=GL_POLYGON)
            draw_polyline_norm(frame.reshape((5,2)),1,RGBA(r,g,b,a*alpha))
            draw_polyline_norm(hat.reshape((4,2)),1,RGBA(r,g,b,a*alpha))
            text_anchor = frame.reshape((5,-1))[2]
            text_anchor[1] = 1-text_anchor[1]
            text_anchor *=img_size[1],img_size[0]
            text_anchor = text_anchor[0],text_anchor[1]-75
            surface_edit_anchor = text_anchor[0],text_anchor[1]+25
            marker_edit_anchor = text_anchor[0],text_anchor[1]+50
            if self.defined:
                if marker_mode:
                    draw_points([marker_edit_anchor],color=RGBA(0,.8,.7))
                else:
                    draw_points([marker_edit_anchor])
                if surface_mode:
                    draw_points([surface_edit_anchor],color=RGBA(0,.8,.7))
                else:
                    draw_points([surface_edit_anchor])

                self.glfont.set_blur(3.9)
                self.glfont.set_color_float((0,0,0,.8))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'edit surface')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,'add/remove markers')
                self.glfont.set_blur(0.0)
                self.glfont.set_color_float((0.1,8.,8.,.9))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'edit surface')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,'add/remove markers')
            else:
                progress = (self.build_up_status/float(self.required_build_up))*100
                progress_text = '%.0f%%'%progress
                self.glfont.set_blur(3.9)
                self.glfont.set_color_float((0,0,0,.8))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'Learning affiliated markers...')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,progress_text)
                self.glfont.set_blur(0.0)
                self.glfont.set_color_float((0.1,8.,8.,.9))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'Learning affiliated markers...')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,progress_text)
Example #36
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad,
            1 + self.v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        # custom events
        for e in self.custom_events:
            draw_polyline([(e, .06), (e, .005)], color=RGBA(.8, .8, .8, .8))

        size = len(self.custom_events)
        if size > 1:
            for i, e in enumerate(self.custom_events):
                draw_points([(e, .03)], size=5, color=RGBA(.1, .5, .5, 1.))

            i = 0
            while True:
                if i == 0:
                    draw_polyline([(self.custom_events[i], .03),
                                   (self.custom_events[i + 1], 0.03)],
                                  color=RGBA(.8, .8, .8, .8))
                elif (i > 0) and (i < (size - 1)):
                    draw_polyline([(self.custom_events[i] + 1, .03),
                                   (self.custom_events[i + 1], 0.03)],
                                  color=RGBA(.8, .8, .8, .8))

                if 'chain' in self.mode:
                    i += 1
                elif 'in out pairs' in self.mode:
                    i += 2

                if i > (size - 1):
                    break

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
Example #37
0
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        if self.mode == "Show Markers and Surfaces":
            for m in self.markers:
                hat = np.array([[[0, 0], [0, 1], [0.5, 1.3], [1, 1], [1, 0], [0, 0]]], dtype=np.float32)
                hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
                if m["perimeter"] >= self.min_marker_perimeter:
                    draw_polyline(hat.reshape((6, 2)), color=RGBA(0.1, 1.0, 1.0, 0.5))
                    draw_polyline(hat.reshape((6, 2)), color=RGBA(0.1, 1.0, 1.0, 0.3), line_type=GL_POLYGON)
                else:
                    draw_polyline(hat.reshape((6, 2)), color=RGBA(0.1, 1.0, 1.0, 0.5))

            for s in self.surfaces:
                if s not in self.edit_surfaces and s is not self.marker_edit_surface:
                    s.gl_draw_frame(self.img_shape)

            for s in self.edit_surfaces:
                s.gl_draw_frame(self.img_shape, highlight=True, surface_mode=True)
                s.gl_draw_corners()

            if self.marker_edit_surface:
                inc = []
                exc = []
                for m in self.markers:
                    if m["perimeter"] >= self.min_marker_perimeter:
                        if self.marker_edit_surface.markers.has_key(m["id"]):
                            inc.append(m["centroid"])
                        else:
                            exc.append(m["centroid"])
                draw_points(exc, size=20, color=RGBA(1.0, 0.5, 0.5, 0.8))
                draw_points(inc, size=20, color=RGBA(0.5, 1.0, 0.5, 0.8))
                self.marker_edit_surface.gl_draw_frame(
                    self.img_shape, color=(0.0, 0.9, 0.6, 1.0), highlight=True, marker_mode=True
                )

        for s in self.surfaces:
            if self.locate_3d:
                s.gl_display_in_window_3d(self.g_pool.image_tex, self.camera_calibration)
            else:
                s.gl_display_in_window(self.g_pool.image_tex)
Example #38
0
    def _draw_marker_toggles(self, surface):
        active_markers = []
        inactive_markers = []
        for marker in self.tracker.markers:
            if marker.perimeter < self.tracker.marker_min_perimeter:
                continue

            centroid = np.mean(marker.verts_px, axis=0)
            centroid = (centroid[0, 0], centroid[0, 1])
            if marker.id in surface.registered_markers_dist.keys():
                active_markers.append(centroid)
            else:
                inactive_markers.append(centroid)

        pyglui_utils.draw_points(
            inactive_markers, size=20, color=pyglui_utils.RGBA(*self.color_primary, 0.8)
        )
        pyglui_utils.draw_points(
            active_markers, size=20, color=pyglui_utils.RGBA(*self.color_tertiary, 0.8)
        )
Example #39
0
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        glOrtho(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color1 = RGBA(.1,.9,.2,1.)
        color2 = RGBA(.1,.9,.2,1.)

        if self.in_mark != 0 or self.out_mark != self.frame_count:
            draw_polyline( [(self.in_mark,0),(self.out_mark,0)],color=color1,thickness=20.)
        draw_points([(self.in_mark,0),],color=color2,size=20)
        draw_points([(self.out_mark,0),],color=color2,size=20)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value,in_range=(0,1),out_range=(0,1)):
            ratio = (out_range[1]-out_range[0])/(in_range[1]-in_range[0])
            return (value-in_range[0])*ratio+out_range[0]

        pad = 90 * r
        screen_pos = map_value(self.display_pos[0],out_range=(pad,p_window_size[0]-pad)),map_value(self.display_pos[1],out_range=(p_window_size[1]-pad,pad))
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points([screen_pos], size=60*r2, color=RGBA(0., 0., 0., alpha), sharpness=0.9)
        draw_points([screen_pos], size=38*r2, color=RGBA(1., 1., 1., alpha), sharpness=0.8)
        draw_points([screen_pos], size=19*r2, color=RGBA(0., 0., 0., alpha), sharpness=0.55)

        # some feedback on the detection state and button pressing
        if self.detected and self.on_position and self.space_key_was_pressed:
            color = RGBA(.8,.8,0., alpha)
        else:
            if self.detected:
                color = RGBA(0.,.8,0., alpha)
            else:
                color = RGBA(.8,0.,0., alpha)
        draw_points([screen_pos],size=3*r2,color=color,sharpness=0.5)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch {} more times to cancel {}.'.format(self.clicks_to_close, self.mode_pretty))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Example #41
0
    def _draw_marker_toggles(self, surface):
        active_markers = []
        inactive_markers = []
        for marker in self.tracker.markers:
            if marker.perimeter < self.tracker.marker_min_perimeter:
                continue

            centroid = np.mean(marker.verts_px, axis=0)
            centroid = (centroid[0, 0], centroid[0, 1])
            if marker.id in surface.registered_markers_dist.keys():
                active_markers.append(centroid)
            else:
                inactive_markers.append(centroid)

        pyglui_utils.draw_points(inactive_markers,
                                 size=20,
                                 color=pyglui_utils.RGBA(
                                     *self.color_primary, 0.8))
        pyglui_utils.draw_points(active_markers,
                                 size=20,
                                 color=pyglui_utils.RGBA(
                                     *self.color_tertiary, 0.8))
Example #42
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """
        if self.active or self.visualize:
            # Draw hand detection results
            for (x1, y1, x2, y2), fingertips in zip(self.hand_viz, self.finger_viz):
                pts = np.array(
                    [[x1, y1], [x1, y2], [x2, y2], [x2, y1], [x1, y1]], np.int32
                )
                cygl_utils.draw_polyline(
                    pts,
                    thickness=3 * self.g_pool.gui_user_scale,
                    color=cygl_utils.RGBA(0.0, 1.0, 0.0, 1.0),
                )
                for tip in fingertips:
                    if tip is not None:
                        y, x = tip
                        cygl_utils.draw_progress(
                            (x, y),
                            0.0,
                            1.0,
                            inner_radius=25 * self.g_pool.gui_user_scale,
                            outer_radius=35 * self.g_pool.gui_user_scale,
                            color=cygl_utils.RGBA(1.0, 1.0, 1.0, 1.0),
                            sharpness=0.9,
                        )

                        cygl_utils.draw_points(
                            [(x, y)],
                            size=10 * self.g_pool.gui_user_scale,
                            color=cygl_utils.RGBA(1.0, 1.0, 1.0, 1.0),
                            sharpness=0.9,
                        )
Example #43
0
def draw_ellipse(
    ellipse: Dict, rgba: Tuple, thickness: float, draw_center: bool = False
):
    try:
        pts = cv2.ellipse2Poly(
            center=(int(ellipse["center"][0]), int(ellipse["center"][1])),
            axes=(int(ellipse["axes"][0] / 2), int(ellipse["axes"][1] / 2)),
            angle=int(ellipse["angle"]),
            arcStart=0,
            arcEnd=360,
            delta=8,
        )
    except ValueError:
        # Happens when converting 'nan' to int
        # TODO: Investigate why results are sometimes 'nan'
        logger.debug(f"WARN: trying to draw ellipse with 'NaN' data: {ellipse}")
        return

    draw_polyline(pts, thickness, RGBA(*rgba))
    if draw_center:
        draw_points(
            [ellipse["center"]], size=20, color=RGBA(*rgba), sharpness=1.0,
        )
Example #44
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        # custom events
        for e in self.custom_events:
            draw_polyline([(e,.06),(e,.005)], color = RGBA(.8, .8, .8, .8))

        size = len(self.custom_events)
        if size > 1:
            for i, e in enumerate(self.custom_events):
                draw_points([(e, .03)], size = 5, color = RGBA(.1, .5, .5, 1.)) 

            i = 0
            while True:
                if i == 0:
                    draw_polyline([(self.custom_events[i],.03),(self.custom_events[i+1],0.03)], color = RGBA(.8, .8, .8, .8))
                elif (i > 0) and (i < (size-1)):
                    draw_polyline([(self.custom_events[i] +1,.03),(self.custom_events[i+1],0.03)], color = RGBA(.8, .8, .8, .8))

                if 'chain' in self.mode:
                    i += 1
                elif 'in out pairs' in self.mode:
                    i += 2

                if i > (size-1):
                    break

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
Example #45
0
 def draw_fps(self, width, height, scale):
     with gl_utils.Coord_System(*self.cache["xlim"], *self.cache["ylim"]):
         if self.show_world_fps:
             cygl_utils.draw_points(self.cache["world"],
                                    size=2 * scale,
                                    color=COLOR_LEGEND_WORLD)
         if self.show_eye_fps:
             cygl_utils.draw_points(self.cache["eye0"],
                                    size=2 * scale,
                                    color=COLOR_LEGEND_EYE_RIGHT)
             cygl_utils.draw_points(self.cache["eye1"],
                                    size=2 * scale,
                                    color=COLOR_LEGEND_EYE_LEFT)
Example #46
0
 def draw_fps(self, width, height, scale):
     with gl_utils.Coord_System(*self.cache["xlim"], *self.cache["ylim"]):
         if self.show_world_fps:
             cygl_utils.draw_points(
                 self.cache["world"], size=2 * scale, color=COLOR_LEGEND_WORLD
             )
         if self.show_eye_fps:
             cygl_utils.draw_points(
                 self.cache["eye0"], size=2 * scale, color=COLOR_LEGEND_EYE_RIGHT
             )
             cygl_utils.draw_points(
                 self.cache["eye1"], size=2 * scale, color=COLOR_LEGEND_EYE_LEFT
             )
    def __draw_circle_marker(self, position: T.Optional[T.Tuple[float, float]],
                             is_valid: bool, alpha: float):
        if position is None:
            return

        r2 = 2 * self.__marker_radius
        screen_point = self.__marker_position_on_screen(position)

        if is_valid:
            marker_circle_rgb_feedback = self._MARKER_CIRCLE_RGB_FEEDBACK_VALID
        else:
            marker_circle_rgb_feedback = self._MARKER_CIRCLE_RGB_FEEDBACK_INVALID

        draw_points(
            [screen_point],
            size=self._MARKER_CIRCLE_SIZE_OUTER * r2,
            color=RGBA(*self._MARKER_CIRCLE_RGB_OUTER, alpha),
            sharpness=self._MARKER_CIRCLE_SHARPNESS_OUTER,
        )
        draw_points(
            [screen_point],
            size=self._MARKER_CIRCLE_SIZE_MIDDLE * r2,
            color=RGBA(*self._MARKER_CIRCLE_RGB_MIDDLE, alpha),
            sharpness=self._MARKER_CIRCLE_SHARPNESS_MIDDLE,
        )
        draw_points(
            [screen_point],
            size=self._MARKER_CIRCLE_SIZE_INNER * r2,
            color=RGBA(*self._MARKER_CIRCLE_RGB_INNER, alpha),
            sharpness=self._MARKER_CIRCLE_SHARPNESS_INNER,
        )
        draw_points(
            [screen_point],
            size=self._MARKER_CIRCLE_SIZE_FEEDBACK * r2,
            color=RGBA(*marker_circle_rgb_feedback, alpha),
            sharpness=self._MARKER_CIRCLE_SHARPNESS_FEEDBACK,
        )
Example #48
0
	def update_window(self, g_pool, result ):

		if not result:
			return

		if not self.window:
			return

		self.begin_update_window()

		self.image_width , self.image_height = g_pool.capture.frame_size

		latest_circle = result['circle']
		predicted_circle = result['predicted_circle']
		edges =  result['edges']
		sphere_models = result['models']

		self.clear_gl_screen()
		self.trackball.push()

		# 2. in pixel space draw video frame
		glLoadMatrixf(self.get_image_space_matrix(15))
		g_pool.image_tex.draw( quad=((0,self.image_height),(self.image_width,self.image_height),(self.image_width,0),(0,0)) ,alpha=0.5)

		glLoadMatrixf(self.get_adjusted_pixel_space_matrix(15))
		self.draw_frustum( self.image_width, self.image_height, self.focal_length )

		glLoadMatrixf(self.get_anthropomorphic_matrix())
		model_count = 0;
		sphere_color = RGBA( 0,147/255.,147/255.,0.2)
		initial_sphere_color = RGBA( 0,147/255.,147/255.,0.2)

		alternative_sphere_color = RGBA( 1,0.5,0.5,0.05)
		alternative_initial_sphere_color = RGBA( 1,0.5,0.5,0.05)

		for model in sphere_models:
			bin_positions = model['bin_positions']
			sphere = model['sphere']
			initial_sphere = model['initial_sphere']

			if model_count == 0:
				# self.draw_sphere(initial_sphere[0],initial_sphere[1], color = sphere_color )
				self.draw_sphere(sphere[0],sphere[1],  color = initial_sphere_color )
				glutils.draw_points(bin_positions, 3 , RGBA(0.6,0.0,0.6,0.5) )

			else:
				#self.draw_sphere(initial_sphere[0],initial_sphere[1], color = alternative_sphere_color )
				self.draw_sphere(sphere[0],sphere[1],  color = alternative_initial_sphere_color )

			model_count += 1


		self.draw_circle( latest_circle[0], latest_circle[1], latest_circle[2], RGBA(0.0,1.0,1.0,0.4))
		# self.draw_circle( predicted_circle[0], predicted_circle[1], predicted_circle[2], RGBA(1.0,0.0,0.0,0.4))

		glutils.draw_points(edges, 2 , RGBA(1.0,0.0,0.6,0.5) )

		glLoadMatrixf(self.get_anthropomorphic_matrix())
		self.draw_coordinate_system(4)

		self.trackball.pop()

		self.draw_debug_info(result)

		self.end_update_window()

		return True
 def _draw_inner_dot(reference_location):
     cygl_utils.draw_points(
         [reference_location.screen_pos],
         size=5,
         color=cygl_utils.RGBA(0.0, 0.9, 0.0, 1.0),
     )
Example #50
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        from background_helper import IPC_Logging_Task_Proxy

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D, Detector_Dummy

        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D,
            Detector_Dummy.__name__: Detector_Dummy,
        }

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.process = "eye{}".format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            plugins = (g_pool.capture_manager, g_pool.capture)
            # call `on_drop` callbacks until a plugin indicates
            # that it has consumed the event (by returning True)
            any(p.on_drop(paths) for p in plugins)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
        )
        if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode", "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image": "Raw eye camera image. This uses the least amount of CPU power",
            "roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        capture_manager_settings = session_settings.get(
            "capture_manager_settings", ("UVC_Manager", {})
        )

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__: c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](
            g_pool, **manager_settings
        )

        if eye_id == 0:
            cap_src = ["Pupil Cam3 ID0", "Pupil Cam2 ID0", "Pupil Cam1 ID0", "HD-6000"]
        else:
            cap_src = ["Pupil Cam3 ID1", "Pupil Cam2 ID1", "Pupil Cam1 ID1"]

        # Initialize capture
        default_settings = (
            "UVC_Source",
            {"preferred_names": cap_src, "frame_size": (320, 240), "frame_rate": 120},
        )

        capture_source_settings = overwrite_cap_settings or session_settings.get(
            "capture_settings", default_settings
        )
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__: c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](
            g_pool, **source_settings
        )
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get("pupil_detector_settings", None)
        last_pupil_detector = pupil_detectors[
            session_settings.get("last_pupil_detector", Detector_2D.__name__)
        ]
        g_pool.pupil_detector = last_pupil_detector(g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.deinit_ui()
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_ui()

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = g_pool.capture.frame_size
        width *= 2
        height *= 2
        width += icon_bar_width
        width, height = session_settings.get("window_size", (width, height))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position", window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu(
            "Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
        )
        g_pool.iconbar = ui.Scrolling_Menu(
            "Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
        )
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            )
        )

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos, g_pool.capture.frame_size
                    )  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(
                        pos, g_pool.u_r.handle_size, g_pool.u_r.handle_size
                    ):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size", set_window_size))
        general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            )
        )
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode]
        )

        general_settings.append(g_pool.display_mode_info)

        detector_selector = ui.Selector(
            "pupil_detector",
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_Dummy, Detector_2D, Detector_3D],
            labels=["disabled", "C++ 2d detector", "C++ 3d detector"],
            label="Detection method",
        )
        general_settings.append(detector_selector)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        g_pool.pupil_detector.init_ui()
        g_pool.capture.init_ui()
        g_pool.capture_manager.init_ui()
        g_pool.writer = None

        def replace_source(source_class_name, source_settings):
            g_pool.capture.deinit_ui()
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](
                g_pool, **source_settings
            )
            g_pool.capture.init_ui()
            if g_pool.writer:
                logger.info("Done recording.")
                try:
                    g_pool.writer.release()
                except RuntimeError:
                    logger.error("No eye video recorded")
                g_pool.writer = None

        g_pool.replace_source = replace_source  # for ndsi capture

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "set_detection_mapping_mode":
                    if notification["mode"] == "3d":
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    elif notification["mode"] == "2d":
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_Dummy):
                            set_detector(Detector_Dummy)
                        detector_selector.read_only = True
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(
                            record_path, "eye{}.mp4".format(eye_id)
                        )
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, g_pool.capture.frame_rate
                            )
                        elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = AV_Writer(
                                video_path, g_pool.capture.frame_rate
                            )
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify(
                        {
                            "subject": "meta.doc",
                            "actor": "eye{}".format(eye_id),
                            "doc": eye.__doc__,
                        }
                    )
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (
                    subject.startswith("start_eye_capture")
                    and notification["target"] == g_pool.process
                ):
                    replace_source(notification["name"], notification["args"])
                elif notification["subject"].startswith("pupil_detector.set_property"):
                    target_process = notification.get("target", g_pool.process)
                    should_apply = target_process == g_pool.process

                    if should_apply:
                        try:
                            property_name = notification["name"]
                            property_value = notification["value"]
                            if "2d" in notification["subject"]:
                                g_pool.pupil_detector.set_2d_detector_property(
                                    property_name, property_value
                                )
                            elif "3d" in notification["subject"]:
                                if not isinstance(g_pool.pupil_detector, Detector_3D):
                                    raise ValueError(
                                        "3d properties are only available"
                                        " if 3d detector is active"
                                    )
                                g_pool.pupil_detector.set_3d_detector_property(
                                    property_name, property_value
                                )
                            else:
                                raise KeyError(
                                    "Notification subject does not "
                                    "specifiy detector type."
                                )
                            logger.debug(
                                "`{}` property set to {}".format(
                                    property_name, property_value
                                )
                            )
                        except KeyError:
                            logger.error("Malformed notification received")
                            logger.debug(traceback.format_exc())
                        except (ValueError, TypeError):
                            logger.error("Invalid property or value")
                            logger.debug(traceback.format_exc())
                elif notification["subject"].startswith(
                    "pupil_detector.broadcast_properties"
                ):
                    target_process = notification.get("target", g_pool.process)
                    should_respond = target_process == g_pool.process
                    if should_respond:
                        props = g_pool.pupil_detector.get_detector_properties()
                        properties_broadcast = {
                            "subject": "pupil_detector.properties.{}".format(eye_id),
                            **props,  # add properties to broadcast
                        }
                        ipc_socket.notify(properties_broadcast)
                g_pool.capture.on_notify(notification)
                g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get("frame")
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                    f_height,
                    f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.format(
                                    type(frame), frame_publish_format
                                )
                            )
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send(
                            {
                                "topic": "frame.eye.{}".format(eye_id),
                                "width": frame.width,
                                "height": frame.height,
                                "index": frame.index,
                                "timestamp": frame.timestamp,
                                "format": frame_publish_format,
                                "__raw_data__": [data],
                            }
                        )

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(
                    frame, g_pool.u_r, g_pool.display_mode == "algorithm"
                )
                if result is not None:
                    result["id"] = eye_id
                    result["topic"] = "pupil.{}".format(eye_id)
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == "algorithm":
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ("camera_image", "roi"):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame and result:
                        if result["method"] == "3d c++":
                            eye_ball = result["projected_sphere"]
                            try:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(eye_ball["center"][0]),
                                        int(eye_ball["center"][1]),
                                    ),
                                    (
                                        int(eye_ball["axes"][0] / 2),
                                        int(eye_ball["axes"][1] / 2),
                                    ),
                                    int(eye_ball["angle"]),
                                    0,
                                    360,
                                    8,
                                )
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(
                                    pts,
                                    2,
                                    RGBA(0.0, 0.9, 0.1, result["model_confidence"]),
                                )
                        if result["confidence"] > 0:
                            if "ellipse" in result:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(result["ellipse"]["center"][0]),
                                        int(result["ellipse"]["center"][1]),
                                    ),
                                    (
                                        int(result["ellipse"]["axes"][0] / 2),
                                        int(result["ellipse"]["axes"][1] / 2),
                                    ),
                                    int(result["ellipse"]["angle"]),
                                    0,
                                    360,
                                    15,
                                )
                                confidence = result["confidence"] * 0.7
                                draw_polyline(pts, 1, RGBA(1.0, 0, 0, confidence))
                                draw_points(
                                    [result["ellipse"]["center"]],
                                    size=20,
                                    color=RGBA(1.0, 0.0, 0.0, confidence),
                                    sharpness=1.0,
                                )

                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    make_coord_system_pixel_based((*window_size[::-1], 3), g_pool.flip)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    unused_elements = g_pool.gui.update()
                    for butt in unused_elements.buttons:
                        uroi_on_mouse_button(*butt)

                    make_coord_system_pixel_based((*window_size[::-1], 3), g_pool.flip)

                    g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["capture_settings"] = (
            g_pool.capture.class_name,
            g_pool.capture.get_init_dict(),
        )
        session_settings["capture_manager_settings"] = (
            g_pool.capture_manager.class_name,
            g_pool.capture_manager.get_init_dict(),
        )
        session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
        session_settings["version"] = str(g_pool.version)
        session_settings[
            "last_pupil_detector"
        ] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            "pupil_detector_settings"
        ] = g_pool.pupil_detector.get_settings()

        session_window_size = glfw.glfwGetWindowSize(main_window)
        if 0 not in session_window_size:
            session_settings["window_size"] = session_window_size

        session_settings.close()

        g_pool.capture.deinit_ui()
        g_pool.capture_manager.deinit_ui()
        g_pool.pupil_detector.deinit_ui()

        g_pool.pupil_detector.cleanup()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")
Example #51
0
def eye(pupil_queue, timebase, pipe_to_world, is_alive_flag, user_dir, version, eye_id, cap_src):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """
    is_alive = Is_Alive_Manager(is_alive_flag)
    with is_alive:
        import logging
        # Set up root logger for this process before doing imports of logged modules.
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        # remove inherited handlers
        logger.handlers = []
        # create file handler which logs even debug messages
        fh = logging.FileHandler(os.path.join(user_dir,'eye%s.log'%eye_id),mode='w')
        # fh.setLevel(logging.DEBUG)
        # create console handler with a higher log level
        ch = logging.StreamHandler()
        ch.setLevel(logger.level+10)
        # create formatter and add it to the handlers
        formatter = logging.Formatter('Eye'+str(eye_id)+' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        formatter = logging.Formatter('EYE'+str(eye_id)+' Process [%(levelname)s] %(name)s : %(message)s')
        ch.setFormatter(formatter)
        # add the handlers to the logger
        logger.addHandler(fh)
        logger.addHandler(ch)
        #silence noisy modules
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        # create logger for the context of this function
        logger = logging.getLogger(__name__)


        # We deferr the imports becasue of multiprocessing.
        # Otherwise the world process each process also loads the other imports.

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui,graph,cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen ,make_coord_system_pixel_based,make_coord_system_norm_based, make_coord_system_eye_camera_based
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math


        # helpers/utils
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
        from av_writer import JPEG_Writer,AV_Writer

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}



        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600,300*eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600,31+300*eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600,300*eye_id)


        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.pupil_queue = pupil_queue
        g_pool.timebase = timebase


        # Callback functions
        def on_resize(window,w, h):
            if not g_pool.iconified:
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w,h)
                graph.adjust_size(w,h)
                adjust_gl_view(w,h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key,scancode,action,mods)

        def on_char(window,char):
            g_pool.gui.update_char(char)

        def on_iconify(window,iconified):
            g_pool.iconified = iconified

        def on_button(window,button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos,glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1-pos[0],1-pos[1]
                    pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,g_pool.u_r.handle_size+40,g_pool.u_r.handle_size+40):
                        return # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button,action,mods)



        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window,x,y):
            g_pool.gui.update_scroll(x,y*scroll_factor)


        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_eye%s'%eye_id))
        if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
            logger.info("Session setting are from older version of this app. I will not use those.")
            session_settings.clear()
        # Initialize capture
        cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
        default_settings = {'frame_size':(640,480),'frame_rate':60}
        previous_settings = session_settings.get('capture_settings',None)
        if previous_settings and previous_settings['name'] == cap.name:
            cap.settings = previous_settings
        else:
            cap.settings = default_settings


        # Test capture
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Could not retrieve image from capture")
            cap.close()
            return

        #signal world that we are ready to go
        # pipe_to_world.send('eye%s process ready'%eye_id)

        # any object we attach to the g_pool object *from now on* will only be visible to this process!
        # vars should be declared here to make them visible to the code reader.
        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.flip = session_settings.get('flip',False)
        g_pool.display_mode = session_settings.get('display_mode','camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                    'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                    'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        g_pool.u_r = UIRoi(frame.img.shape)
        g_pool.u_r.set(session_settings.get('roi',g_pool.u_r.get()))


        def on_frame_size_change(new_size):
            g_pool.u_r = UIRoi((new_size[1],new_size[0]))

        cap.on_frame_size_change = on_frame_size_change

        writer = None

        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()


        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]


        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)


        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s"%eye_id
        width,height = session_settings.get('window_size',(frame.width, frame.height))
        main_window = glfw.glfwCreateWindow(width,height, title, None, None)
        window_pos = session_settings.get('window_position',window_position_default)
        glfw.glfwSetWindowPos(main_window,window_pos[0],window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_frame(frame)
        glfw.glfwSwapInterval(0)

        sphere  = Sphere(20)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale',1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",pos=(-300,0),size=(0,0),header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Slider('scale',g_pool.gui, setter=set_scale,step = .05,min=1.,max=2.5,label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,frame.width,frame.height)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',g_pool,setter=set_display_mode_info,selection=['camera_image','roi','algorithm'], labels=['Camera Image', 'ROI', 'Algorithm'], label="Mode") )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.sidebar.append(general_settings)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',getter = lambda: g_pool.pupil_detector.__class__ ,setter=set_detector,selection=[Detector_2D, Detector_3D],labels=['C++ 2d detector', 'C++ 3d detector'], label="Detection method")
        general_settings.append(detector_selector)

        # let detector add its GUI
        g_pool.pupil_detector.init_gui(g_pool.sidebar)
        # let the camera add its GUI
        g_pool.capture.init_gui(g_pool.sidebar)


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window,on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window,on_iconify)
        glfw.glfwSetKeyCallback(main_window,on_key)
        glfw.glfwSetCharCallback(main_window,on_char)
        glfw.glfwSetMouseButtonCallback(main_window,on_button)
        glfw.glfwSetCursorPosCallback(main_window,on_pos)
        glfw.glfwSetScrollCallback(main_window,on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))


        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config',{})


        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = frame.timestamp

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20,130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140,130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"


        #create a timer to control window update frequency
        window_update_timer = timer(1/60.)
        def window_should_update():
            return next(window_update_timer)


        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if pipe_to_world.poll():
                cmd = pipe_to_world.recv()
                if cmd == 'Exit':
                    break
                elif cmd == "Ping":
                    pipe_to_world.send("Pong")
                    command = None
                else:
                    command,payload = cmd
                if command == 'Set_Detection_Mapping_Mode':
                    if payload == '3d':
                        if not isinstance(g_pool.pupil_detector,Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only  = True
                    else:
                        set_detector(Detector_2D)
                        detector_selector.read_only = False

            else:
                command = None



            # Get an image from the grabber
            try:
                frame = cap.get_frame()
            except CameraCaptureError:
                logger.error("Capture from Camera Failed. Stopping.")
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                cap.seek_to_frame(0)
                frame = cap.get_frame()


            #update performace graphs
            t = frame.timestamp
            dt,ts = t-ts,t
            try:
                fps_graph.add(1./dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()


            ###  RECORDING of Eye Video (on demand) ###
            # Setup variables and lists for recording
            if 'Rec_Start' == command:
                record_path,raw_mode = payload
                logger.info("Will save eye video to: %s"%record_path)
                timestamps_path = os.path.join(record_path, "eye%s_timestamps.npy"%eye_id)
                if raw_mode and frame.jpeg_buffer:
                    video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                    writer = JPEG_Writer(video_path,cap.frame_rate)
                else:
                    video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                    writer = AV_Writer(video_path,cap.frame_rate)
                timestamps = []
            elif 'Rec_Stop' == command:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path,np.asarray(timestamps))
                del timestamps

            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)


            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            g_pool.pupil_queue.put(result)

            # GL drawing
            if window_should_update():
                if not g_pool.iconified:
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image','roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    window_size =  glfw.glfwGetWindowSize(main_window)
                    make_coord_system_pixel_based((frame.height,frame.width,3),g_pool.flip)

                    if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                                (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                                int(eye_ball['angle']),0,360,8)
                        except ValueError as e:
                            pass
                        else:
                            draw_polyline(pts,2,RGBA(0.,.9,.1,result['model_confidence']) )

                    if result['confidence'] >0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly( (int(result['ellipse']['center'][0]),int(result['ellipse']['center'][1])),
                                            (int(result['ellipse']['axes'][0]/2),int(result['ellipse']['axes'][1]/2)),
                                            int(result['ellipse']['angle']),0,360,15)
                            confidence = result['confidence'] * 0.7 #scale it a little
                            draw_polyline(pts,1,RGBA(1.,0,0,confidence))
                            draw_points([result['ellipse']['center']],size=20,color=RGBA(1.,0.,0.,confidence),sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize() #detector decides if we visualize or not


        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path,np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window) #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = g_pool.version
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        cap.close()


        logger.debug("Process done")
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = getHDPIFactor(self._window)
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 90 * r
        screen_pos = (
            map_value(self.display_pos[0], out_range=(pad, p_window_size[0] - pad)),
            map_value(self.display_pos[1], out_range=(p_window_size[1] - pad, pad)),
        )
        alpha = (
            1.0
        )  # interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points(
            [screen_pos], size=60 * r2, color=RGBA(0.0, 0.0, 0.0, alpha), sharpness=0.9
        )
        draw_points(
            [screen_pos], size=38 * r2, color=RGBA(1.0, 1.0, 1.0, alpha), sharpness=0.8
        )
        draw_points(
            [screen_pos], size=19 * r2, color=RGBA(0.0, 0.0, 0.0, alpha), sharpness=0.55
        )

        # some feedback on the detection state
        color = (
            RGBA(0.0, 0.8, 0.0, alpha)
            if len(self.markers) and self.on_position
            else RGBA(0.8, 0.0, 0.0, alpha)
        )
        draw_points([screen_pos], size=3 * r2, color=color, sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch {} more times to cancel calibration.".format(
                    self.clicks_to_close
                ),
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
def draw_marker(pos,r,alpha):
    black = RGBA(0.,0.,0.,alpha)
    white = RGBA(1.,1.,1.,alpha)
    for r,c in zip((r,0.8*r,0.6*r,.4*r,.2*r),(black,white,black,white,black)):
        draw_points([pos],size=r,color=c,sharpness=0.95)
Example #54
0
File: eye.py Project: sleip87/pupil
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
        user_dir, version, eye_id, overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        # logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # general imports
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible
        from ui_roi import UIRoi
        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__: Detector_2D,
                           Detector_3D.__name__: Detector_3D}

        # UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 10.0
            window_position_default = (600,31+ 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        # g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.process = 'eye{}'.format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0] / glfw.glfwGetWindowSize(window)[0])
                g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
                g_pool.gui.update_window(w, h)
                g_pool.gui.collect_menus()
                for g in g_pool.graphs:
                    g.scale = hdpi_factor
                    g.adjust_window_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(pos,g_pool.capture.frame_size) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos, g_pool.u_r.handle_size + 40,g_pool.u_r.handle_size + 40):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode('utf-8') for x in range(count)]
            g_pool.capture_manager.on_drop(paths)
            g_pool.capture.on_drop(paths)

        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir, 'user_settings_eye{}'.format(eye_id)))
        if VersionFormat(session_settings.get("version", '0.0')) != g_pool.version:
            logger.info("Session setting are from a different version of this app. I will not use those.")
            session_settings.clear()


        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get(
            'display_mode', 'camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                         'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                         'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)


        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = ('UVC_Source',{
                            'preferred_names'  : cap_src,
                            'frame_size': (640,480),
                            'frame_rate': 90
                            })

        capture_source_settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__:c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and tuple(roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(
            g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)
        width, height = session_settings.get(
            'window_size', g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get(
            'window_position', window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1,1),dtype=np.uint8)+125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Selector('gui_user_scale', g_pool,
                                          setter=set_scale,
                                          selection=[.8, .9, 1., 1.1, 1.2],
                                          label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,*g_pool.capture.frame_size)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',
                                            g_pool,
                                            setter=set_display_mode_info,
                                            selection=['camera_image','roi','algorithm'],
                                            labels=['Camera Image', 'ROI', 'Algorithm'],
                                            label="Mode")
                                            )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',
                                        getter=lambda: g_pool.pupil_detector.__class__,
                                        setter=set_detector, selection=[
                                            Detector_2D, Detector_3D],
                                        labels=['C++ 2d detector',
                                                'C++ 3d detector'],
                                        label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture_source_menu.collapsed = True
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        g_pool.capture_manager.init_gui()
        g_pool.writer = None

        def replace_source(source_class_name,source_settings):
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
            g_pool.capture.init_gui()
            if g_pool.writer:
                logger.info("Done recording.")
                g_pool.writer.release()
                g_pool.writer = None

        g_pool.replace_source = replace_source # for ndsi capture

        def replace_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
                                                'capture_manager',g_pool,
                                                setter    = replace_manager,
                                                getter    = lambda: g_pool.capture_manager.__class__,
                                                selection = manager_classes,
                                                labels    = [b.gui_name for b in manager_classes],
                                                label     = 'Manager'
                                            ))


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject.startswith('eye_process.should_stop'):
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye'] and g_pool.capture.online:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path, "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame, 'h264_buffer'):
                            g_pool.writer = H264Writer(video_path,
                                                       g_pool.capture.frame_size[0],
                                                       g_pool.capture.frame_size[1],
                                                       g_pool.capture.frame_rate)
                        else:
                            g_pool.writer = AV_Writer(video_path, g_pool.capture.frame_rate)
                elif subject == 'recording.stopped':
                    if g_pool.writer:
                        logger.info("Done recording.")
                        g_pool.writer.release()
                        g_pool.writer = None
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye{}'.format(eye_id),
                        'doc': eye.__doc__
                    })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format', 'jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                elif subject.startswith('start_eye_capture') and notification['target'] == g_pool.process:
                    replace_source(notification['name'],notification['args'])

                g_pool.capture.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get('frame')
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (f_height, f_width):
                    g_pool.pupil_detector.on_resolution_change((g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]), g_pool.capture.frame_size)
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        else:
                            raise AttributeError()
                    except AttributeError:
                        pass
                    else:
                        pupil_socket.send('frame.eye.%s'%eye_id,{
                            'width': frame.width,
                            'height': frame.height,
                            'index': frame.index,
                            'timestamp': frame.timestamp,
                            'format': frame_publish_format,
                            '__raw_data__': [data]
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1./dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
                result['id'] = eye_id

                # stream the result
                pupil_socket.send('pupil.%s'%eye_id,result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == 'algorithm':
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ('camera_image', 'roi'):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()
                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame:
                        if result['method'] == '3d c++':
                            eye_ball = result['projected_sphere']
                            try:
                                pts = cv2.ellipse2Poly(
                                    (int(eye_ball['center'][0]),
                                     int(eye_ball['center'][1])),
                                    (int(eye_ball['axes'][0] / 2),
                                     int(eye_ball['axes'][1] / 2)),
                                    int(eye_ball['angle']), 0, 360, 8)
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(pts, 2, RGBA(0., .9, .1, result['model_confidence']))
                        if result['confidence'] > 0:
                            if 'ellipse' in result:
                                pts = cv2.ellipse2Poly(
                                    (int(result['ellipse']['center'][0]),
                                     int(result['ellipse']['center'][1])),
                                    (int(result['ellipse']['axes'][0] / 2),
                                     int(result['ellipse']['axes'][1] / 2)),
                                    int(result['ellipse']['angle']), 0, 360, 15)
                                confidence = result['confidence'] * 0.7
                                draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
                                draw_points([result['ellipse']['center']],
                                            size=20,
                                            color=RGBA(1., 0., 0., confidence),
                                            sharpness=1.)

                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    g_pool.gui.update()

                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict()
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")
Example #55
0
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url,ipc_push_url, user_dir, version, eye_id,overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """


    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx,ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx,ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,ipc_sub_url,topics=("notify",))

    with Is_Alive_Manager(is_alive_flag,ipc_socket,eye_id):

        #logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx,ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui,graph,cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen ,make_coord_system_pixel_based,make_coord_system_norm_based, make_coord_system_eye_camera_based,is_window_visible
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math


        # helpers/utils
        from uvc import get_time_monotonic, StreamError
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from av_writer import JPEG_Writer,AV_Writer

        from video_capture import InitialisationError,StreamError, Fake_Source,EndofVideoFileError, source_classes, manager_classes
        source_by_name = {src.class_name():src for src in source_classes}

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}



        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600,300*eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600,31+300*eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600,300*eye_id)


        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic()-g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window,w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w,h)
                graph.adjust_size(w,h)
                adjust_gl_view(w,h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key,scancode,action,mods)

        def on_char(window,char):
            g_pool.gui.update_char(char)

        def on_iconify(window,iconified):
            g_pool.iconified = iconified

        def on_button(window,button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos,glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1-pos[0],1-pos[1]
                    pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,g_pool.u_r.handle_size+40,g_pool.u_r.handle_size+40):
                        return # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button,action,mods)



        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window,x,y):
            g_pool.gui.update_scroll(x,y*scroll_factor)

        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_eye%s'%eye_id))
        if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
            logger.info("Session setting are from older version of this app. I will not use those.")
            session_settings.clear()

        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = {
            'source_class_name': 'UVC_Source',
            'preferred_names'  : cap_src,
            'frame_size': (640,480),
            'frame_rate': 90
        }
        settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        try:
            cap = source_by_name[settings['source_class_name']](g_pool, **settings)
        except (KeyError,InitialisationError) as e:
            if isinstance(e,KeyError):
                logger.warning('Incompatible capture setting encountered. Falling back to fake source.')
            cap = Fake_Source(g_pool, **settings)

        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip',False)
        g_pool.display_mode = session_settings.get('display_mode','camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                    'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                    'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}



        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        writer = None

        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()


        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]


        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)


        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s"%eye_id
        width,height = session_settings.get('window_size',g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width,height, title, None, None)
        window_pos = session_settings.get('window_position',window_position_default)
        glfw.glfwSetWindowPos(main_window,window_pos[0],window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        glfw.glfwSwapInterval(0)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale',1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",pos=(-300,0),size=(0,0),header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Slider('scale',g_pool.gui, setter=set_scale,step = .05,min=1.,max=2.5,label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,frame.width,frame.height)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',g_pool,setter=set_display_mode_info,selection=['camera_image','roi','algorithm'], labels=['Camera Image', 'ROI', 'Algorithm'], label="Mode") )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',getter = lambda: g_pool.pupil_detector.__class__ ,setter=set_detector,selection=[Detector_2D, Detector_3D],labels=['C++ 2d detector', 'C++ 3d detector'], label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)
        g_pool.capture_manager.init_gui()

        def open_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
            'capture_manager',g_pool,
            setter    = open_manager,
            getter    = lambda: g_pool.capture_manager.__class__,
            selection = manager_classes,
            labels    = [b.gui_name for b in manager_classes],
            label     = 'Manager'
        ))

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window,on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window,on_iconify)
        glfw.glfwSetKeyCallback(main_window,on_key)
        glfw.glfwSetCharCallback(main_window,on_char)
        glfw.glfwSetMouseButtonCallback(main_window,on_button)
        glfw.glfwSetCursorPosCallback(main_window,on_pos)
        glfw.glfwSetScrollCallback(main_window,on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))


        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config',{})


        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20,130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140,130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        #create a timer to control window update frequency
        window_update_timer = timer(1/60.)
        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t,notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector,Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only  = True
                    else:
                        if not isinstance(g_pool.pupil_detector,Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye']:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: %s"%record_path)
                        timestamps_path = os.path.join(record_path, "eye%s_timestamps.npy"%eye_id)
                        if raw_mode and frame.jpeg_buffer:
                            video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                            writer = JPEG_Writer(video_path,g_pool.capture.frame_rate)
                        else:
                            video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                            writer = AV_Writer(video_path,g_pool.capture.frame_rate)
                        timestamps = []
                elif subject == 'recording.stopped':
                    if writer:
                        logger.info("Done recording.")
                        writer.release()
                        writer = None
                        np.save(timestamps_path,np.asarray(timestamps))
                        del timestamps
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject':'meta.doc',
                        'actor':'eye%i'%eye_id,
                        'doc':eye.__doc__
                        })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format','jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                else:
                    g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            try:
                frame = g_pool.capture.get_frame()
            except StreamError as e:
                logger.error("Error getting frame. Stopping eye process.")
                logger.debug("Caught error: %s"%e)
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                g_pool.capture.seek_to_frame(0)
                frame = g_pool.capture.get_frame()

            g_pool.u_r = UIRoi((frame.height,frame.width))
            g_pool.capture_manager.update(frame, {})

            if should_publish_frames and frame.jpeg_buffer:
                if   frame_publish_format == "jpeg":
                    data = frame.jpeg_buffer
                elif frame_publish_format == "yuv":
                    data = frame.yuv_buffer
                elif frame_publish_format == "bgr":
                    data = frame.bgr
                elif frame_publish_format == "gray":
                    data = frame.gray
                pupil_socket.send('frame.eye.%s'%eye_id,{
                    'width': frame.width,
                    'height': frame.width,
                    'index': frame.index,
                    'timestamp': frame.timestamp,
                    'format': frame_publish_format,
                    '__raw_data__': [data]
                })


            #update performace graphs
            t = frame.timestamp
            dt,ts = t-ts,t
            try:
                fps_graph.add(1./dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()



            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)


            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            pupil_socket.send('pupil.%s'%eye_id,result)

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image','roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    window_size =  glfw.glfwGetWindowSize(main_window)
                    make_coord_system_pixel_based((frame.height,frame.width,3),g_pool.flip)
                    g_pool.capture.gl_display()

                    if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                                (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                                int(eye_ball['angle']),0,360,8)
                        except ValueError as e:
                            pass
                        else:
                            draw_polyline(pts,2,RGBA(0.,.9,.1,result['model_confidence']) )

                    if result['confidence'] >0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly( (int(result['ellipse']['center'][0]),int(result['ellipse']['center'][1])),
                                            (int(result['ellipse']['axes'][0]/2),int(result['ellipse']['axes'][1]/2)),
                                            int(result['ellipse']['angle']),0,360,15)
                            confidence = result['confidence'] * 0.7 #scale it a little
                            draw_polyline(pts,1,RGBA(1.,0,0,confidence))
                            draw_points([result['ellipse']['center']],size=20,color=RGBA(1.,0.,0.,confidence),sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize() #detector decides if we visualize or not


        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path,np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window) #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = g_pool.version
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")
	def update_window(self, g_pool , gaze_points0 , sphere0 , gaze_points1 = [] , sphere1 = None, intersection_points = []  ):

		if not self.window:
			return

		self.begin_update_window() #sets context

		self.clear_gl_screen()
		self.trackball.push()

		glMatrixMode( GL_MODELVIEW )

		# draw things in world camera coordinate system
		glPushMatrix()
		glLoadIdentity()

		calibration_points_line_color = RGBA(0.5,0.5,0.5,0.1);
		error_line_color = RGBA(1.0,0.0,0.0,0.5)

		self.draw_coordinate_system(200)
		if self.world_camera_width != 0:
			self.draw_frustum( self.world_camera_width/ 10.0 , self.world_camera_height/ 10.0 , self.world_camera_focal / 10.0)

		for p in self.cal_observed_points_3d:
			glutils.draw_polyline( [ (0,0,0), p]  , 1 , calibration_points_line_color, line_type = GL_LINES)
			#draw error lines form eye gaze points to  ref points
		for(cal_point,ref_point) in zip(self.cal_ref_points_3d, self.cal_observed_points_3d):
				glutils.draw_polyline( [ cal_point, ref_point]  , 1 , error_line_color, line_type = GL_LINES)

		#calibration points
		glutils.draw_points( self.cal_ref_points_3d , 4 , RGBA( 0, 1, 1, 1 ) )


		glPopMatrix()

		if sphere0:
			# eye camera
			glPushMatrix()
			glLoadMatrixf( self.eye_camera_to_world_matrix0.T )

			self.draw_coordinate_system(60)
			self.draw_frustum( self.image_width / 10.0, self.image_height / 10.0, self.focal_length /10.)
			glPopMatrix()

			#everything else is in world coordinates

			#eye
			sphere_center0 = list(sphere0['center'])
			sphere_radius0 = sphere0['radius']
			self.draw_sphere(sphere_center0,sphere_radius0,  color = RGBA(1,1,0,1))

			#gazelines
			for p in self.cal_gaze_points0_3d:
				glutils.draw_polyline( [ sphere_center0, p]  , 1 , calibration_points_line_color, line_type = GL_LINES)

			#calibration points
			# glutils.draw_points( self.cal_gaze_points0_3d , 4 , RGBA( 1, 0, 1, 1 ) )

			#current gaze points
			glutils.draw_points( gaze_points0 , 2 , RGBA( 1, 0, 0, 1 ) )
			for p in gaze_points0:
				glutils.draw_polyline( [sphere_center0, p]  , 1 , RGBA(0,0,0,1), line_type = GL_LINES)

			#draw error lines form eye gaze points to  ref points
			for(cal_gaze_point,ref_point) in zip(self.cal_gaze_points0_3d, self.cal_ref_points_3d):
				glutils.draw_polyline( [ cal_gaze_point, ref_point]  , 1 , error_line_color, line_type = GL_LINES)

		#second eye
		if sphere1:
			# eye camera
			glPushMatrix()
			glLoadMatrixf( self.eye_camera_to_world_matrix1.T )

			self.draw_coordinate_system(60)
			self.draw_frustum( self.image_width / 10.0, self.image_height / 10.0, self.focal_length /10.)
			glPopMatrix()

			#everything else is in world coordinates

			#eye
			sphere_center1 = list(sphere1['center'])
			sphere_radius1 = sphere1['radius']
			self.draw_sphere(sphere_center1,sphere_radius1,  color = RGBA(1,1,0,1))

			#gazelines
			for p in self.cal_gaze_points1_3d:
				glutils.draw_polyline( [ sphere_center1, p]  , 4 , calibration_points_line_color, line_type = GL_LINES)

			#calibration points
			glutils.draw_points( self.cal_gaze_points1_3d , 4 , RGBA( 1, 0, 1, 1 ) )

			#current gaze points
			glutils.draw_points( gaze_points1 , 2 , RGBA( 1, 0, 0, 1 ) )
			for p in gaze_points1:
				glutils.draw_polyline( [sphere_center1, p]  , 1 , RGBA(0,0,0,1), line_type = GL_LINES)

			#draw error lines form eye gaze points to  ref points
			for(cal_gaze_point,ref_point) in zip(self.cal_gaze_points1_3d, self.cal_ref_points_3d):
				glutils.draw_polyline( [ cal_gaze_point, ref_point]  , 1 , error_line_color, line_type = GL_LINES)

		self.trackball.pop()

		self.end_update_window() #swap buffers, handle context
Example #57
0
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
        user_dir, version, eye_id, glint_queue, glint_vector_queue, overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        # logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # general imports
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible
        from ui_roi import UIRoi
        # monitoring
        import psutil
        import math
        from time import time
        import json


        # helpers/utils
        from file_methods import Persistent_Dict, load_object

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__: Detector_2D,
                           Detector_3D.__name__: Detector_3D}

        # UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 10.0
            window_position_default = (600,31+ 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        # g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.process = 'eye{}'.format(eye_id)
        g_pool.timebase = timebase
        g_pool.glints = glint_queue
        g_pool.glint_pupil_vectors = glint_vector_queue

        glint_detector = Glint_Detector(g_pool)
        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0] / glfw.glfwGetWindowSize(window)[0])
                g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
                g_pool.gui.update_window(w, h)
                g_pool.gui.collect_menus()
                for g in g_pool.graphs:
                    g.scale = hdpi_factor
                    g.adjust_window_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(pos,g_pool.capture.frame_size) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos, g_pool.u_r.handle_size + 40,g_pool.u_r.handle_size + 40):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)


        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)



        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir, 'user_settings_eye{}'.format(eye_id)))
        if VersionFormat(session_settings.get("version", '0.0')) != g_pool.version:
            logger.info("Session setting are from a different version of this app. I will not use those.")
            session_settings.clear()


        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get(
            'display_mode', 'camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                         'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                         'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)


        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)
        g_pool.pupil_settings = "default"

        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = ('UVC_Source',{
                            'preferred_names'  : cap_src,
                            'frame_size': (640,480),
                            'frame_rate': 90
                            })

        capture_source_settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__:c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)


        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(
            g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        def convert_keys_to_string(dictionary):
            """Recursively converts dictionary keys to strings."""
            if not isinstance(dictionary, dict):
                return dictionary
            return dict((str(k.decode('utf8')), convert_keys_to_string(v))
                for k, v in dictionary.items())

        def set_pupil_settings(new_settings):
            g_pool.pupil_settings = new_settings

            if not new_settings == "default":
                try:
                    path = os.path.join(g_pool.user_dir,'pupil_settings_' + new_settings + '.json')
                    with open(path, 'r') as fp:
                        json_str = fp.read()
                        pupil_settings_new = json.loads(json_str)
                except:
                    logger.error("Settings don't exist")
                #pupil_settings_new = convert_keys_to_string(pupil_settings_new)
                pupil_settings_new['2D_Settings'] = dict(pupil_settings_new)
                pupil_settings = g_pool.pupil_detector.get_settings()
                controls = g_pool.capture.uvc_capture.controls
                controls_dict = dict([(c.display_name,c) for c in controls])
                try:
                    g_pool.capture.frame_rate = pupil_settings_new['frame_rate']
                    g_pool.capture.frame_size = pupil_settings_new['frame_size']
                except:
                    logger.info("no frame rate and frame size in camera settings")
                for key in controls_dict:
                    print(key)
                    try:
                        controls_dict[key].value = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in camera settings" %key)
                for key in pupil_settings.keys():
                    print(key)
                    try:
                        if type(pupil_settings[key]) == dict:
                            for sec_key in pupil_settings[key].keys():
                                pupil_settings[key][sec_key] = pupil_settings_new[key][sec_key]
                        else:
                            pupil_settings[key] = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in pupil settings" %key)
        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)
        width, height = session_settings.get(
            'window_size', g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get(
            'window_position', window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1,1),dtype=np.uint8)+125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')

        general_settings.append(ui.Selector('pupil_settings',g_pool,setter=set_pupil_settings,selection=['default','indoors','outdoors_sunny', 'outdoors_cloudy', 'vanilla'], labels=['Default', 'Indoors', 'Outdoors Sunny', 'Outdoors Cloudy', 'Vanilla'], label="Pupil settings") )
        general_settings.append(ui.Selector('gui_user_scale', g_pool,
                                          setter=set_scale,
                                          selection=[.8, .9, 1., 1.1, 1.2],
                                          label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,*g_pool.capture.frame_size)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',
                                            g_pool,
                                            setter=set_display_mode_info,
                                            selection=['camera_image','roi','algorithm'],
                                            labels=['Camera Image', 'ROI', 'Algorithm'],
                                            label="Mode")
                                            )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',
                                        getter=lambda: g_pool.pupil_detector.__class__,
                                        setter=set_detector, selection=[
                                            Detector_2D, Detector_3D],
                                        labels=['C++ 2d detector',
                                                'C++ 3d detector'],
                                        label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture_source_menu.collapsed = True
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        g_pool.capture_manager.init_gui()
        g_pool.writer = None

        def replace_source(source_class_name,source_settings):
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
            g_pool.capture.init_gui()
            if g_pool.writer:
                logger.info("Done recording.")
                g_pool.writer.release()
                g_pool.writer = None

        g_pool.replace_source = replace_source # for ndsi capture

        def replace_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
                                                'capture_manager',g_pool,
                                                setter    = replace_manager,
                                                getter    = lambda: g_pool.capture_manager.__class__,
                                                selection = manager_classes,
                                                labels    = [b.gui_name for b in manager_classes],
                                                label     = 'Manager'
                                            ))


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_key)
        glfw.glfwSetCharCallback(main_window, on_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye'] and g_pool.capture.online:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path, "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame, 'h264_buffer'):
                            g_pool.writer = H264Writer(video_path,
                                                       g_pool.capture.frame_size[0],
                                                       g_pool.capture.frame_size[1],
                                                       g_pool.capture.frame_rate)
                        else:
                            g_pool.writer = AV_Writer(video_path, g_pool.capture.frame_rate)
                elif subject == 'recording.stopped':
                    if g_pool.writer:
                        logger.info("Done recording.")
                        g_pool.writer.release()
                        g_pool.writer = None
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye{}'.format(eye_id),
                        'doc': eye.__doc__
                    })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format', 'jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                elif subject.startswith('start_eye_capture') and notification['target'] == g_pool.process:
                    replace_source(notification['name'],notification['args'])

                g_pool.capture.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get('frame')
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (f_height, f_width):
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames and frame.jpeg_buffer:
                    if   frame_publish_format == "jpeg":
                        data = frame.jpeg_buffer
                    elif frame_publish_format == "yuv":
                        data = frame.yuv_buffer
                    elif frame_publish_format == "bgr":
                        data = frame.bgr
                    elif frame_publish_format == "gray":
                        data = frame.gray
                    pupil_socket.send('frame.eye.%s'%eye_id,{
                        'width': frame.width,
                        'height': frame.width,
                        'index': frame.index,
                        'timestamp': frame.timestamp,
                        'format': frame_publish_format,
                        '__raw_data__': [data]
                    })

                t = frame.timestamp
                tUnix = time()
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1./dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result, roi = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
                result['id'] = eye_id
                result['unix_ts'] = tUnix

                #glint detection
                #glints = [[0,0,0,0,0,0], [0,0,0,0,0,1]] #glint_detector.glint(frame, eye_id, u_roi=g_pool.u_r, pupil=result, roi=roi)
                #result['glints'] = glints

                #g_pool.glints.put(glints)


                #save glint-pupil vector results
                #if glints[0][3]:
                #    glint_pupil_vector = {'timestamp': glints[0][0], 'x': result['norm_pos'][0]-glints[0][3], 'y': result['norm_pos'][1]-glints[0][4], 'pupil_confidence': result['confidence'], 'glint_found': True, 'id': eye_id, 'x2': result['norm_pos'][0]-glints[1][3], 'y2': result['norm_pos'][1]-glints[1][4]}
                #else:
                #     glint_pupil_vector = {'timestamp': glints[0][0], 'x': result['norm_pos'][0]-glints[0][3], 'y': result['norm_pos'][1]-glints[0][4], 'pupil_confidence': result['confidence'], 'glint_found': False, 'id': eye_id, 'x2': result['norm_pos'][0]-glints[1][3], 'y2': result['norm_pos'][1]-glints[1][4]}
                #g_pool.glint_pupil_vectors.put(glint_pupil_vector)

                # stream the result
                pupil_socket.send('pupil.%s'%eye_id,result)

            cpu_graph.update()




            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == 'algorithm':
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ('camera_image', 'roi'):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    #if frame:
                    #    glints = np.array(result['glints'])
                    #    if len(glints)>0 and glints[0][3]:
                    #        if glints[1][3]:
                    #            cygl_draw_points(glints[:,1:3], size=20,color=cygl_rgba(0.,0.,1.,.5),sharpness=1.)
                    #        elif result['confidence'] > 0.75:
                    #            cygl_draw_points(glints[:,1:3], size=20,color=cygl_rgba(0.,0.,1.,.5),sharpness=1.)

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame:
                        if result['method'] == '3d c++':
                            eye_ball = result['projected_sphere']
                            try:
                                pts = cv2.ellipse2Poly(
                                    (int(eye_ball['center'][0]),
                                     int(eye_ball['center'][1])),
                                    (int(eye_ball['axes'][0] / 2),
                                     int(eye_ball['axes'][1] / 2)),
                                    int(eye_ball['angle']), 0, 360, 8)
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(pts, 2, RGBA(0., .9, .1, result['model_confidence']))
                        if result['confidence'] > 0:
                            if 'ellipse' in result:
                                pts = cv2.ellipse2Poly(
                                    (int(result['ellipse']['center'][0]),
                                     int(result['ellipse']['center'][1])),
                                    (int(result['ellipse']['axes'][0] / 2),
                                     int(result['ellipse']['axes'][1] / 2)),
                                    int(result['ellipse']['angle']), 0, 360, 15)
                                confidence = result['confidence'] * 0.7
                                draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
                                draw_points([result['ellipse']['center']],
                                            size=20,
                                            color=RGBA(1., 0., 0., confidence),
                                            sharpness=1.)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    g_pool.gui.update()

                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict()
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        glint_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")