Exemplo n.º 1
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        r = p_window_size[0] / 15.0
        # compensate for radius of marker
        gl.glOrtho(-r, p_window_size[0] + r, p_window_size[1] + r, -r, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        # hacky way of scaling and fitting in different window rations/sizes
        grid = _make_grid() * min((p_window_size[0], p_window_size[1] * 5.5 / 4.0))
        # center the pattern
        grid -= np.mean(grid)
        grid += (p_window_size[0] / 2 - r, p_window_size[1] / 2 + r)

        draw_points(grid, size=r, color=RGBA(0.0, 0.0, 0.0, 1), sharpness=0.95)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch {} more times to close window.".format(self.clicks_to_close),
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 2
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(
            self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 90 * r
        screen_pos = map_value(
            self.display_pos[0],
            out_range=(pad, p_window_size[0] - pad)), map_value(
                self.display_pos[1], out_range=(p_window_size[1] - pad, pad))
        alpha = 1.0  #interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points([screen_pos],
                    size=60 * r2,
                    color=RGBA(0., 0., 0., alpha),
                    sharpness=0.9)
        draw_points([screen_pos],
                    size=38 * r2,
                    color=RGBA(1., 1., 1., alpha),
                    sharpness=0.8)
        draw_points([screen_pos],
                    size=19 * r2,
                    color=RGBA(0., 0., 0., alpha),
                    sharpness=0.55)

        # some feedback on the detection state
        color = RGBA(0., .8, 0., alpha) if len(
            self.markers) and self.on_position else RGBA(0.8, 0., 0., alpha)
        draw_points([screen_pos], size=3 * r2, color=color, sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.))
            self.glfont.draw_text(
                p_window_size[0] / 2., p_window_size[1] / 4.,
                'Touch {} more times to cancel calibration.'.format(
                    self.clicks_to_close))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 3
0
    def gl_display_in_window(self,world_tex):
        """
        here we map a selected surface onto a seperate window.
        """
        if self._window and self.detected:
            active_window = glfwGetCurrentContext()
            glfwMakeContextCurrent(self._window)
            clear_gl_screen()

            # cv uses 3x3 gl uses 4x4 tranformation matricies
            m = cvmat_to_glmat(self.m_from_screen)

            glMatrixMode(GL_PROJECTION)
            glPushMatrix()
            glLoadIdentity()
            glOrtho(0, 1, 0, 1,-1,1) # gl coord convention

            glMatrixMode(GL_MODELVIEW)
            glPushMatrix()
            #apply m  to our quad - this will stretch the quad such that the ref suface will span the window extends
            glLoadMatrixf(m)

            world_tex.draw()

            glMatrixMode(GL_PROJECTION)
            glPopMatrix()
            glMatrixMode(GL_MODELVIEW)
            glPopMatrix()

            # now lets get recent pupil positions on this surface:
            for gp in self.gaze_on_srf:
                draw_points_norm([gp['norm_pos']],color=RGBA(0.0,0.8,0.5,0.8), size=80)

            glfwSwapBuffers(self._window)
            glfwMakeContextCurrent(active_window)
Exemplo n.º 4
0
    def gl_display(self):
        """
        This is where we can draw to any gl surface
        by default this is the main window, below we change that
        """

        # active our window
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self.window)

        # start drawing things:
        gl_utils.clear_gl_screen()
        # set coordinate system to be between 0 and 1 of the extents of the window
        gl_utils.make_coord_system_norm_based()
        # draw the image
        draw_gl_texture(self.img)

        # make coordinte system identical to the img pixel coordinate system
        gl_utils.make_coord_system_pixel_based(self.img.shape)
        # draw some points on top of the image
        # notice how these show up in our window but not in the main window
        draw_points([(200, 400), (600, 400)], color=RGBA(0., 4., .8, .8), size=self.my_var)
        draw_polyline([(200, 400), (600, 400)], color=RGBA(0., 4., .8, .8), thickness=3)

        # since this is our own window we need to swap buffers in the plugin
        glfwSwapBuffers(self.window)

        # and finally reactive the main window
        glfwMakeContextCurrent(active_window)
Exemplo n.º 5
0
    def _draw_frame(window, path, frames, index: int, show_help: bool):
        frames_data = [frame.load(path) for frame in frames[index]]

        for frame, frame_meta in zip(frames_data, frames[index]):
            PreviewWindow._draw_text(
                frame,
                "Preview {}/{} (eye{})".format(index + 1, len(frames),
                                               frame_meta.eye_id),
                (15, frame.shape[0] - 30),
            )
            #PreviewWindow._draw_text(
            #    frame,
            #    "Confidence: {}".format(frame_meta.confidence),
            #    (15, frame.shape[0] - 30),
            #)

        frame = frames_data[0] if len(frames_data) == 1 else np.hstack(
            frames_data)

        # Present usage hints at first load
        if show_help:
            PreviewWindow._draw_text(
                frame,
                "Usage: Use the arrow keys for navigating between frames.",
                (15, 40),
            )

        with PreviewWindow.WindowContextManager(window):
            clear_gl_screen()
            make_coord_system_norm_based()
            draw_gl_texture(frame, interpolation=False)
            glfw.glfwSwapBuffers(window)
Exemplo n.º 6
0
    def gl_display_in_window(self,world_tex_id):
        """
        here we map a selected surface onto a seperate window.
        """
        if self._window and self.detected:
            active_window = glfwGetCurrentContext()
            glfwMakeContextCurrent(self._window)
            clear_gl_screen()

            # cv uses 3x3 gl uses 4x4 tranformation matricies
            m = cvmat_to_glmat(self.m_from_screen)

            glMatrixMode(GL_PROJECTION)
            glPushMatrix()
            glLoadIdentity()
            glOrtho(0, 1, 0, 1,-1,1) # gl coord convention

            glMatrixMode(GL_MODELVIEW)
            glPushMatrix()
            #apply m  to our quad - this will stretch the quad such that the ref suface will span the window extends
            glLoadMatrixf(m)

            draw_named_texture(world_tex_id)
            glMatrixMode(GL_PROJECTION)
            glPopMatrix()
            glMatrixMode(GL_MODELVIEW)
            glPopMatrix()

            # now lets get recent pupil positions on this surface:
            draw_points_norm(self.gaze_on_srf,color=RGBA(0.,8.,.5,.8), size=80)

            glfwSwapBuffers(self._window)
            glfwMakeContextCurrent(active_window)
        if self.window_should_close:
            self.close_window()
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        r = p_window_size[0]/15.
        # compensate for radius of marker
        gl.glOrtho(-r,p_window_size[0]+r,p_window_size[1]+r,-r ,-1,1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        #hacky way of scaling and fitting in different window rations/sizes
        grid = _make_grid()*min((p_window_size[0],p_window_size[1]*5.5/4.))
        #center the pattern
        grid -= np.mean(grid)
        grid +=(p_window_size[0]/2-r,p_window_size[1]/2+r)

        draw_points(grid,size=r,color=RGBA(0.,0.,0.,1),sharpness=0.95)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch %s more times to close window.'%self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 8
0
 def gl_display_in_window(self, img):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(self._window)
     clear_gl_screen()
     # gl stuff that will show on your plugin window goes here
     draw_gl_texture(img, interpolation=False)
     glfwSwapBuffers(self._window)
     glfwMakeContextCurrent(active_window)
Exemplo n.º 9
0
 def gl_display_in_window(self,img):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(self._window)
     clear_gl_screen()
     # gl stuff that will show on your plugin window goes here
     draw_gl_texture(img,interpolation=False)
     glfwSwapBuffers(self._window)
     glfwMakeContextCurrent(active_window)
Exemplo n.º 10
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################

        hdpi_factor = glfwGetFramebufferSize(
            self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = 110 * self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gl.glOrtho(-r * .6, p_window_size[0] + r * .6,
                   p_window_size[1] + r * .7, -r * .7, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos, p_window_size, flip_y=True)

        draw_marker(screen_pos, r, self.pattern_alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            cygl_draw_points([screen_pos],
                             size=5,
                             color=cygl_rgba(0., 1., 0., self.pattern_alpha),
                             sharpness=0.95)
        else:
            cygl_draw_points([screen_pos],
                             size=5,
                             color=cygl_rgba(1., 0., 0., self.pattern_alpha),
                             sharpness=0.95)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.))
            self.glfont.draw_text(
                p_window_size[0] / 2., p_window_size[1] / 4.,
                'Touch %s more times to cancel calibration.' %
                self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 11
0
    def gl_display_frozen_scene(self):
        gl_utils.clear_gl_screen()

        gl_utils.make_coord_system_norm_based()

        self.frozen_scene_tex.draw()

        gl_utils.make_coord_system_pixel_based(
            (self.g_pool.capture.frame_size[1], self.g_pool.capture.frame_size[0], 3)
        )
Exemplo n.º 12
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # gl stuff that will show on your plugin window goes here

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 13
0
    def gl_display_frozen_scene(self):
        gl_utils.clear_gl_screen()

        gl_utils.make_coord_system_norm_based()

        self.frozen_scene_tex.draw()

        gl_utils.make_coord_system_pixel_based(
            (self.g_pool.capture.frame_size[1],
             self.g_pool.capture.frame_size[0], 3))
Exemplo n.º 14
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(
            self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = 110 * self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = .6 * r
        screen_pos = map_value(
            self.display_pos[0],
            out_range=(pad, p_window_size[0] - pad)), map_value(
                self.display_pos[1], out_range=(p_window_size[1] - pad, pad))
        alpha = interp_fn(
            self.screen_marker_state, 0., 1.,
            float(self.sample_duration + self.lead_in + self.lead_out),
            float(self.lead_in), float(self.sample_duration + self.lead_in))

        draw_concentric_circles(screen_pos, r, 6, alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],
                        size=5,
                        color=RGBA(0., .8, 0., alpha),
                        sharpness=0.5)
        else:
            draw_points([screen_pos],
                        size=5,
                        color=RGBA(0.8, 0., 0., alpha),
                        sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.))
            self.glfont.draw_text(
                p_window_size[0] / 2., p_window_size[1] / 4.,
                'Touch %s more times to cancel calibration.' %
                self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 15
0
    def gl_display_in_t265_window(self):
        """ Show new frame in window. """
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self.t265_window)

        clear_gl_screen()
        if self.last_video_frame is not None:
            make_coord_system_norm_based()
            draw_gl_texture(self.last_video_frame)

        glfwSwapBuffers(self.t265_window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 16
0
Arquivo: eye.py Projeto: N-M-T/pupil
        def consume_events_and_render_buffer():
            glfw.make_context_current(main_window)
            clear_gl_screen()

            if all(c > 0 for c in g_pool.camera_render_size):
                glViewport(0, 0, *g_pool.camera_render_size)
                for p in g_pool.plugins:
                    p.gl_display()

            glViewport(0, 0, *window_size)
            # render graphs
            fps_graph.draw()
            cpu_graph.draw()

            # render GUI
            try:
                clipboard = glfw.get_clipboard_string(main_window).decode()
            except (AttributeError, glfw.GLFWError):
                # clipboard is None, might happen on startup
                clipboard = ""
            g_pool.gui.update_clipboard(clipboard)
            user_input = g_pool.gui.update()
            if user_input.clipboard != clipboard:
                # only write to clipboard if content changed
                glfw.set_clipboard_string(main_window, user_input.clipboard)

            for button, action, mods in user_input.buttons:
                x, y = glfw.get_cursor_pos(main_window)
                pos = gl_utils.window_coordinate_to_framebuffer_coordinate(
                    main_window, x, y, cached_scale=None
                )
                pos = normalize(pos, g_pool.camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                # Position in img pixels
                pos = denormalize(pos, g_pool.capture.frame_size)

                for plugin in g_pool.plugins:
                    if plugin.on_click(pos, button, action):
                        break

            for key, scancode, action, mods in user_input.keys:
                for plugin in g_pool.plugins:
                    if plugin.on_key(key, scancode, action, mods):
                        break

            for char_ in user_input.chars:
                for plugin in g_pool.plugins:
                    if plugin.on_char(char_):
                        break

            # update screen
            glfw.swap_buffers(main_window)
Exemplo n.º 17
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(self._window)[0] / glfwGetWindowSize(self._window)[0]
        r = 110 * self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 0.6 * r
        screen_pos = (
            map_value(self.display_pos[0], out_range=(pad, p_window_size[0] - pad)),
            map_value(self.display_pos[1], out_range=(p_window_size[1] - pad, pad)),
        )
        alpha = interp_fn(
            self.screen_marker_state,
            0.0,
            1.0,
            float(self.sample_duration + self.lead_in + self.lead_out),
            float(self.lead_in),
            float(self.sample_duration + self.lead_in),
        )

        draw_concentric_circles(screen_pos, r, 6, alpha)
        # some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos], size=5, color=RGBA(0.0, 0.8, 0.0, alpha), sharpness=0.5)
        else:
            draw_points([screen_pos], size=5, color=RGBA(0.8, 0.0, 0.0, alpha), sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch %s more times to cancel calibration." % self.clicks_to_close,
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value,in_range=(0,1),out_range=(0,1)):
            ratio = (out_range[1]-out_range[0])/(in_range[1]-in_range[0])
            return (value-in_range[0])*ratio+out_range[0]

        pad = 90 * r
        screen_pos = map_value(self.display_pos[0],out_range=(pad,p_window_size[0]-pad)),map_value(self.display_pos[1],out_range=(p_window_size[1]-pad,pad))
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points([screen_pos], size=60*r2, color=RGBA(0., 0., 0., alpha), sharpness=0.9)
        draw_points([screen_pos], size=38*r2, color=RGBA(1., 1., 1., alpha), sharpness=0.8)
        draw_points([screen_pos], size=19*r2, color=RGBA(0., 0., 0., alpha), sharpness=0.55)

        # some feedback on the detection state and button pressing
        if self.detected and self.on_position and self.space_key_was_pressed:
            color = RGBA(.8,.8,0., alpha)
        else:
            if self.detected:
                color = RGBA(0.,.8,0., alpha)
            else:
                color = RGBA(.8,0.,0., alpha)
        draw_points([screen_pos],size=3*r2,color=color,sharpness=0.5)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch {} more times to cancel {}.'.format(self.clicks_to_close, self.mode_pretty))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 19
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################


        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = 110*self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gl.glOrtho(-r*.6,p_window_size[0]+r*.6,p_window_size[1]+r*.7,-r*.7 ,-1,1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos,p_window_size,flip_y=True)
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        draw_concentric_circles(screen_pos,r,6,alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],size=5,color=RGBA(0.,1.,0.,alpha),sharpness=0.95)
        else:
            draw_points([screen_pos],size=5,color=RGBA(1.,0.,0.,alpha),sharpness=0.95)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch %s more times to cancel calibration.'%self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 20
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)
        clear_gl_screen()

        gl.glColor3f(.80, .80, .8)
        self.draw_rect(0, 0, 2000, 2000)
        self.draw_markers()


        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = 110*self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value,in_range=(0,1),out_range=(0,1)):
            ratio = (out_range[1]-out_range[0])/(in_range[1]-in_range[0])
            return (value-in_range[0])*ratio+out_range[0]

        #pad = 0.1*1920
        pad = .7*r
        screen_pos = map_value(self.display_pos[0],out_range=(pad,p_window_size[0]-pad)),map_value(self.display_pos[1],out_range=(p_window_size[1]-pad,pad))
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))
        if self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
            draw_concentric_circles(screen_pos,r,6,alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],size=10*self.marker_scale,color=RGBA(0.,.8,0.,alpha),sharpness=0.5)
        elif self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
            draw_points([screen_pos],size=10*self.marker_scale,color=RGBA(0.8,0.,0.,alpha),sharpness=0.5)

        #if self.clicks_to_close <5:
        #    self.glfont.set_size(int(p_window_size[0]/30.))
        #   self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch {} more times to cancel calibration.'.format(self.clicks_to_close))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 21
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################
        r = 60
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gluOrtho2D(
            -r, p_window_size[0] + r, p_window_size[1] + r,
            -r)  # origin in the top left corner just like the img np-array
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos, p_window_size, flip_y=True)

        draw_marker(screen_pos)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_gl_point(screen_pos, 5, (0., 1., 0., 1.))
        else:
            draw_gl_point(screen_pos, 5, (1., 0., 0., 1.))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 22
0
    def gl_display(self):
        """
        This is where we can draw to any gl surface
        by default this is the main window, below we change that
        """

        # active our window
        #active_window = glfwGetCurrentContext()
        #glfwMakeContextCurrent(self.window)

        # start drawing things:
        gl_utils.clear_gl_screen()
        # set coordinate system to be between 0 and 1 of the extents of the window
        gl_utils.make_coord_system_norm_based()
        # draw the image
        try:
        	draw_gl_texture(self.img)
        	gl_utils.make_coord_system_pixel_based(self.img.shape)
        except AttributeError:
        	pass
Exemplo n.º 23
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################
        r = 60
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gluOrtho2D(-r,p_window_size[0]+r,p_window_size[1]+r, -r) # origin in the top left corner just like the img np-array
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos,p_window_size,flip_y=True)

        draw_marker(screen_pos)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_gl_point(screen_pos, 5, (0.,1.,0.,1.))
        else:
            draw_gl_point(screen_pos, 5, (1.,0.,0.,1.))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 24
0
    def drawing_context(self):
        if self.__gl_handle is None:
            return

        if glfw.glfwWindowShouldClose(self.__gl_handle):
            self.close()
            return

        with self._switch_to_current_context():
            clear_gl_screen()

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glLoadIdentity()
            gl.glOrtho(0, self.window_size[0], self.window_size[1], 0, -1, 1)
            # Switch back to Model View Matrix
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glLoadIdentity()

            yield self.unsafe_handle

            glfw.glfwSwapBuffers(self.unsafe_handle)
Exemplo n.º 25
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()
        #todo write code to display pattern.
        # r = 60.
        # gl.glMatrixMode(gl.GL_PROJECTION)
        # gl.glLoadIdentity()
        # draw_gl_point((-.5,-.5),50.)

        # p_window_size = glfwGetWindowSize(self._window)
        # # compensate for radius of marker
        # x_border,y_border = normalize((r,r),p_window_size)

        # # if p_window_size[0]<p_window_size[1]: #taller
        # #     ratio = p_window_size[1]/float(p_window_size[0])
        # #     gluOrtho2D(-x_border,1+x_border,y_border, 1-y_border) # origin in the top left corner just like the img np-array

        # # else: #wider
        # #     ratio = p_window_size[0]/float(p_window_size[1])
        # #     gluOrtho2D(-x_border,ratio+x_border,y_border, 1-y_border) # origin in the top left corner just like the img np-array

        # gluOrtho2D(-x_border,1+x_border,y_border, 1-y_border) # origin in the top left corner just like the img np-array

        # # Switch back to Model View Matrix
        # gl.glMatrixMode(gl.GL_MODELVIEW)
        # gl.glLoadIdentity()

        # for p in self.display_grid:
        #     draw_gl_point(p)
        # #some feedback on the detection state

        # # if self.detected and self.on_position:
        # #     draw_gl_point(screen_pos, 5.0, (0.,1.,0.,1.))
        # # else:
        # #     draw_gl_point(screen_pos, 5.0, (1.,0.,0.,1.))

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 26
0
    def gl_display_in_window(self, surface):
        """
        here we map a selected surface onto a seperate window.
        """
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)
        clear_gl_screen()

        # cv uses 3x3 gl uses 4x4 tranformation matricies
        m = cvmat_to_glmat(surface.m_from_screen)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(0, 1, 0, 1)  # gl coord convention

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        #apply m  to our quad - this will stretch the quad such that the ref suface will span the window extends
        glLoadMatrixf(m)

        draw_named_texture(self.g_pool.image_tex)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()

        #now lets get recent pupil positions on this surface:
        try:
            gaze_on_surface = [
                p['realtime gaze on ' + surface.name]
                for p in self.recent_pupil_positions
            ]
        except KeyError:
            gaze_on_surface = []
        draw_gl_points_norm(gaze_on_surface, color=(0., 8., .5, .8), size=80)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 27
0
    def gl_display_in_window(self, world_tex):
        """
        here we map a selected surface onto a separate window.
        """
        if self._window and self.surface.detected:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self._window)
            gl_utils.clear_gl_screen()

            # cv uses 3x3 gl uses 4x4 transformation matrices
            width, height = self.tracker.camera_model.resolution
            img_corners = np.array(
                [(0, height), (width, height), (width, 0), (0, 0)], dtype=np.float32
            )
            denorm_trans = _get_norm_to_points_trans(img_corners)

            trans_mat = self.surface.dist_img_to_surf_trans @ denorm_trans
            trans_mat = gl_utils.cvmat_to_glmat(trans_mat)

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPushMatrix()
            gl.glLoadIdentity()
            gl.glOrtho(0, 1, 0, 1, -1, 1)
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()
            # apply trans_mat to our quad - this will stretch the quad such that the
            # surface will span the window extends
            gl.glLoadMatrixf(trans_mat)

            world_tex.draw()

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPopMatrix()
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPopMatrix()

            self.draw_recent_pupil_positions()

            glfw.glfwSwapBuffers(self._window)
            glfw.glfwMakeContextCurrent(active_window)
Exemplo n.º 28
0
    def gl_display_in_window(self,surface):
        """
        here we map a selected surface onto a seperate window.
        """
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)
        clear_gl_screen()

        # cv uses 3x3 gl uses 4x4 tranformation matricies
        m = cvmat_to_glmat(surface.m_from_screen)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(0, 1, 0, 1) # gl coord convention

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        #apply m  to our quad - this will stretch the quad such that the ref suface will span the window extends
        glLoadMatrixf(m)

        #redraw will use the most recent world img texture and use it again.
        redraw_gl_texture( ((0,0),(1,0),(1,1),(0,1)) )

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


        #now lets get recent pupil positions on this surface:
        try:
            gaze_on_surface = [p['realtime gaze on '+surface.name] for p in self.recent_pupil_positions]
        except KeyError:
            gaze_on_surface = []
        draw_gl_points_norm(gaze_on_surface,color=(0.,8.,.5,.8), size=80)


        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 29
0
    def gl_display_in_window(self, world_tex):
        """
        here we map a selected surface onto a separate window.
        """
        if self._window and self.surface.detected:
            active_window = glfw.get_current_context()
            glfw.make_context_current(self._window)
            gl_utils.clear_gl_screen()

            # cv uses 3x3 gl uses 4x4 transformation matrices
            width, height = self.tracker.camera_model.resolution
            img_corners = np.array(
                [(0, height), (width, height), (width, 0), (0, 0)], dtype=np.float32
            )
            denorm_trans = _get_norm_to_points_trans(img_corners)

            trans_mat = self.surface.dist_img_to_surf_trans @ denorm_trans
            trans_mat = gl_utils.cvmat_to_glmat(trans_mat)

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPushMatrix()
            gl.glLoadIdentity()
            gl.glOrtho(0, 1, 0, 1, -1, 1)
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()
            # apply trans_mat to our quad - this will stretch the quad such that the
            # surface will span the window extends
            gl.glLoadMatrixf(trans_mat)

            world_tex.draw()

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPopMatrix()
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPopMatrix()

            self.draw_recent_pupil_positions()

            glfw.swap_buffers(self._window)
            glfw.make_context_current(active_window)
Exemplo n.º 30
0
def show_no_rec_window():
    from pyglui.pyfontstash import fontstash
    from pyglui.ui import get_roboto_font_path
    global rec_dir
    def on_drop(window, count, paths):
        global rec_dir
        rec_dir = paths[0].decode('utf-8')

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir, "user_settings"))
    if VersionFormat(session_settings.get("version", '0.0')) != get_version(version_file):
        logger.info("Session setting are from a  different version of this app. I will not use those.")
        session_settings.clear()
    w, h = session_settings.get('window_size', (1280, 720))
    window_pos = session_settings.get('window_position', window_position_default)

    glfwWindowHint(GLFW_RESIZABLE, 0)
    window = glfwCreateWindow(w, h, 'Pupil Player')
    glfwWindowHint(GLFW_RESIZABLE, 1)

    glfwMakeContextCurrent(window)
    glfwSetWindowPos(window, window_pos[0], window_pos[1])
    glfwSetDropCallback(window, on_drop)

    glfont = fontstash.Context()
    glfont.add_font('roboto', get_roboto_font_path())
    glfont.set_align_string(v_align="center", h_align="middle")
    glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
    gl_utils.basic_gl_setup()
    glClearColor(0.5, .5, 0.5, 0.0)
    text = 'Drop a recording directory onto this window.'
    tip = '(Tip: You can drop a recording directory onto the app icon.)'
    # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
    while not glfwWindowShouldClose(window):

        fb_size = glfwGetFramebufferSize(window)
        hdpi_factor = float(fb_size[0] / glfwGetWindowSize(window)[0])
        gl_utils.adjust_gl_view(*fb_size)

        if rec_dir:
            if is_pupil_rec_dir(rec_dir):
                logger.info("Starting new session with '{}'".format(rec_dir))
                text = "Updating recording format."
                tip = "This may take a while!"
            else:
                logger.error("'{}' is not a valid pupil recording".format(rec_dir))
                tip = "Oops! That was not a valid recording."
                rec_dir = None

        gl_utils.clear_gl_screen()
        glfont.set_blur(10.5)
        glfont.set_color_float((0.0, 0.0, 0.0, 1.))
        glfont.set_size(w/25.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .3*h*hdpi_factor, text)
        glfont.set_size(w/30.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .4*h*hdpi_factor, tip)
        glfont.set_blur(0.96)
        glfont.set_color_float((1., 1., 1., 1.))
        glfont.set_size(w/25.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .3*h*hdpi_factor, text)
        glfont.set_size(w/30.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .4*h*hdpi_factor, tip)

        glfwSwapBuffers(window)

        if rec_dir:
            update_recording_to_recent(rec_dir)
            glfwSetWindowShouldClose(window, True)

        glfwPollEvents()

    session_settings['window_position'] = glfwGetWindowPos(window)
    session_settings['version'] = str(get_version(version_file))
    session_settings.close()
    del glfont
    glfwDestroyWindow(window)
Exemplo n.º 31
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        from background_helper import IPC_Logging_Task_Proxy

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D, Detector_Dummy

        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D,
            Detector_Dummy.__name__: Detector_Dummy,
        }

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.process = "eye{}".format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            plugins = (g_pool.capture_manager, g_pool.capture)
            # call `on_drop` callbacks until a plugin indicates
            # that it has consumed the event (by returning True)
            any(p.on_drop(paths) for p in plugins)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
        )
        if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode", "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image": "Raw eye camera image. This uses the least amount of CPU power",
            "roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        capture_manager_settings = session_settings.get(
            "capture_manager_settings", ("UVC_Manager", {})
        )

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__: c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](
            g_pool, **manager_settings
        )

        if eye_id == 0:
            cap_src = ["Pupil Cam3 ID0", "Pupil Cam2 ID0", "Pupil Cam1 ID0", "HD-6000"]
        else:
            cap_src = ["Pupil Cam3 ID1", "Pupil Cam2 ID1", "Pupil Cam1 ID1"]

        # Initialize capture
        default_settings = (
            "UVC_Source",
            {"preferred_names": cap_src, "frame_size": (320, 240), "frame_rate": 120},
        )

        capture_source_settings = overwrite_cap_settings or session_settings.get(
            "capture_settings", default_settings
        )
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__: c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](
            g_pool, **source_settings
        )
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get("pupil_detector_settings", None)
        last_pupil_detector = pupil_detectors[
            session_settings.get("last_pupil_detector", Detector_2D.__name__)
        ]
        g_pool.pupil_detector = last_pupil_detector(g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.deinit_ui()
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_ui()

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = g_pool.capture.frame_size
        width *= 2
        height *= 2
        width += icon_bar_width
        width, height = session_settings.get("window_size", (width, height))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position", window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu(
            "Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
        )
        g_pool.iconbar = ui.Scrolling_Menu(
            "Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
        )
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            )
        )

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos, g_pool.capture.frame_size
                    )  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(
                        pos, g_pool.u_r.handle_size, g_pool.u_r.handle_size
                    ):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size", set_window_size))
        general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            )
        )
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode]
        )

        general_settings.append(g_pool.display_mode_info)

        detector_selector = ui.Selector(
            "pupil_detector",
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_Dummy, Detector_2D, Detector_3D],
            labels=["disabled", "C++ 2d detector", "C++ 3d detector"],
            label="Detection method",
        )
        general_settings.append(detector_selector)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        g_pool.pupil_detector.init_ui()
        g_pool.capture.init_ui()
        g_pool.capture_manager.init_ui()
        g_pool.writer = None

        def replace_source(source_class_name, source_settings):
            g_pool.capture.deinit_ui()
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](
                g_pool, **source_settings
            )
            g_pool.capture.init_ui()
            if g_pool.writer:
                logger.info("Done recording.")
                try:
                    g_pool.writer.release()
                except RuntimeError:
                    logger.error("No eye video recorded")
                g_pool.writer = None

        g_pool.replace_source = replace_source  # for ndsi capture

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "set_detection_mapping_mode":
                    if notification["mode"] == "3d":
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    elif notification["mode"] == "2d":
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_Dummy):
                            set_detector(Detector_Dummy)
                        detector_selector.read_only = True
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(
                            record_path, "eye{}.mp4".format(eye_id)
                        )
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, g_pool.capture.frame_rate
                            )
                        elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = AV_Writer(
                                video_path, g_pool.capture.frame_rate
                            )
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify(
                        {
                            "subject": "meta.doc",
                            "actor": "eye{}".format(eye_id),
                            "doc": eye.__doc__,
                        }
                    )
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (
                    subject.startswith("start_eye_capture")
                    and notification["target"] == g_pool.process
                ):
                    replace_source(notification["name"], notification["args"])
                elif notification["subject"].startswith("pupil_detector.set_property"):
                    target_process = notification.get("target", g_pool.process)
                    should_apply = target_process == g_pool.process

                    if should_apply:
                        try:
                            property_name = notification["name"]
                            property_value = notification["value"]
                            if "2d" in notification["subject"]:
                                g_pool.pupil_detector.set_2d_detector_property(
                                    property_name, property_value
                                )
                            elif "3d" in notification["subject"]:
                                if not isinstance(g_pool.pupil_detector, Detector_3D):
                                    raise ValueError(
                                        "3d properties are only available"
                                        " if 3d detector is active"
                                    )
                                g_pool.pupil_detector.set_3d_detector_property(
                                    property_name, property_value
                                )
                            else:
                                raise KeyError(
                                    "Notification subject does not "
                                    "specifiy detector type."
                                )
                            logger.debug(
                                "`{}` property set to {}".format(
                                    property_name, property_value
                                )
                            )
                        except KeyError:
                            logger.error("Malformed notification received")
                            logger.debug(traceback.format_exc())
                        except (ValueError, TypeError):
                            logger.error("Invalid property or value")
                            logger.debug(traceback.format_exc())
                elif notification["subject"].startswith(
                    "pupil_detector.broadcast_properties"
                ):
                    target_process = notification.get("target", g_pool.process)
                    should_respond = target_process == g_pool.process
                    if should_respond:
                        props = g_pool.pupil_detector.get_detector_properties()
                        properties_broadcast = {
                            "subject": "pupil_detector.properties.{}".format(eye_id),
                            **props,  # add properties to broadcast
                        }
                        ipc_socket.notify(properties_broadcast)
                g_pool.capture.on_notify(notification)
                g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get("frame")
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                    f_height,
                    f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.format(
                                    type(frame), frame_publish_format
                                )
                            )
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send(
                            {
                                "topic": "frame.eye.{}".format(eye_id),
                                "width": frame.width,
                                "height": frame.height,
                                "index": frame.index,
                                "timestamp": frame.timestamp,
                                "format": frame_publish_format,
                                "__raw_data__": [data],
                            }
                        )

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(
                    frame, g_pool.u_r, g_pool.display_mode == "algorithm"
                )
                if result is not None:
                    result["id"] = eye_id
                    result["topic"] = "pupil.{}".format(eye_id)
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == "algorithm":
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ("camera_image", "roi"):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame and result:
                        if result["method"] == "3d c++":
                            eye_ball = result["projected_sphere"]
                            try:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(eye_ball["center"][0]),
                                        int(eye_ball["center"][1]),
                                    ),
                                    (
                                        int(eye_ball["axes"][0] / 2),
                                        int(eye_ball["axes"][1] / 2),
                                    ),
                                    int(eye_ball["angle"]),
                                    0,
                                    360,
                                    8,
                                )
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(
                                    pts,
                                    2,
                                    RGBA(0.0, 0.9, 0.1, result["model_confidence"]),
                                )
                        if result["confidence"] > 0:
                            if "ellipse" in result:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(result["ellipse"]["center"][0]),
                                        int(result["ellipse"]["center"][1]),
                                    ),
                                    (
                                        int(result["ellipse"]["axes"][0] / 2),
                                        int(result["ellipse"]["axes"][1] / 2),
                                    ),
                                    int(result["ellipse"]["angle"]),
                                    0,
                                    360,
                                    15,
                                )
                                confidence = result["confidence"] * 0.7
                                draw_polyline(pts, 1, RGBA(1.0, 0, 0, confidence))
                                draw_points(
                                    [result["ellipse"]["center"]],
                                    size=20,
                                    color=RGBA(1.0, 0.0, 0.0, confidence),
                                    sharpness=1.0,
                                )

                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    make_coord_system_pixel_based((*window_size[::-1], 3), g_pool.flip)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    unused_elements = g_pool.gui.update()
                    for butt in unused_elements.buttons:
                        uroi_on_mouse_button(*butt)

                    make_coord_system_pixel_based((*window_size[::-1], 3), g_pool.flip)

                    g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["capture_settings"] = (
            g_pool.capture.class_name,
            g_pool.capture.get_init_dict(),
        )
        session_settings["capture_manager_settings"] = (
            g_pool.capture_manager.class_name,
            g_pool.capture_manager.get_init_dict(),
        )
        session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
        session_settings["version"] = str(g_pool.version)
        session_settings[
            "last_pupil_detector"
        ] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            "pupil_detector_settings"
        ] = g_pool.pupil_detector.get_settings()

        session_window_size = glfw.glfwGetWindowSize(main_window)
        if 0 not in session_window_size:
            session_settings["window_size"] = session_window_size

        session_settings.close()

        g_pool.capture.deinit_ui()
        g_pool.capture_manager.deinit_ui()
        g_pool.pupil_detector.deinit_ui()

        g_pool.pupil_detector.cleanup()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")
Exemplo n.º 32
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        if glfwWindowShouldClose(self._window):
            self.close_window()
            return

        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = getHDPIFactor(self._window)
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetFramebufferSize(self._window)
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 90 * r
        screen_pos = (
            map_value(self.display_pos[0], out_range=(pad, p_window_size[0] - pad)),
            map_value(self.display_pos[1], out_range=(p_window_size[1] - pad, pad)),
        )
        alpha = (
            1.0
        )  # interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        r2 = 2 * r
        draw_points(
            [screen_pos], size=60 * r2, color=RGBA(0.0, 0.0, 0.0, alpha), sharpness=0.9
        )
        draw_points(
            [screen_pos], size=38 * r2, color=RGBA(1.0, 1.0, 1.0, alpha), sharpness=0.8
        )
        draw_points(
            [screen_pos], size=19 * r2, color=RGBA(0.0, 0.0, 0.0, alpha), sharpness=0.55
        )

        # some feedback on the detection state
        color = (
            RGBA(0.0, 0.8, 0.0, alpha)
            if len(self.markers) and self.on_position
            else RGBA(0.8, 0.0, 0.0, alpha)
        )
        draw_points([screen_pos], size=3 * r2, color=color, sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch {} more times to cancel calibration.".format(
                    self.clicks_to_close
                ),
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 33
0
def player(g_pool,size):
    """player
        - Shows 9 point calibration pattern
        - Plays a source video synchronized with world process
        - Get src videos from directory (glob)
        - Iterate through videos on each record event
    """


    grid = make_grid()
    grid *=2.5###scale to fit
    # player object
    player = Temp()
    player.play_list = glob('src_video/*')
    path_parent = os.path.dirname( os.path.abspath(sys.argv[0]))
    player.playlist = [os.path.join(path_parent, path) for path in player.play_list]
    player.captures = [autoCreateCapture(src) for src in player.playlist]
    print "Player found %i videos in src_video"%len(player.captures)
    player.captures =  [c for c in player.captures if c is not None]
    print "Player sucessfully loaded %i videos in src_video"%len(player.captures)
    # for c in player.captures: c.auto_rewind = False
    player.current_video = 0

    # Callbacks
    def on_resize(w, h):
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if key == GLFW_KEY_ESC:
                on_close()
    def on_char(char, pressed):
        if char  == ord('9'):
            g_pool.cal9.value = True
            g_pool.calibrate.value = True



    def on_close():
        g_pool.quit.value = True
        print "Player Process closing from window"


    # initialize glfw
    glfwInit()
    glfwOpenWindow(size[0], size[1], 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Player")
    glfwSetWindowPos(100,0)
    glfwDisable(GLFW_AUTO_POLL_EVENTS)


    #Callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)


    #gl state settings
    gl.glEnable( gl.GL_BLEND )
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glClearColor(1.,1.,1.,0.)


    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:

        glfwPollEvents()

        if g_pool.player_refresh.wait(0.01):
            g_pool.player_refresh.clear()

            clear_gl_screen()
            if g_pool.cal9.value:
                circle_id,step = g_pool.cal9_circle_id.value,g_pool.cal9_step.value
                gl.glColor4f(0.0,0.0,0.0,1.0)
                gl.glPointSize(40)
                gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
                gl.glBegin(gl.GL_POINTS)
                for p in grid:
                    gl.glVertex3f(p[0],p[1],0.0)
                gl.glEnd()

                ###display the animated target dot
                gl.glPointSize((40)*(1.01-(step+1)/80.0))
                gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ZERO)
                if g_pool.ref_x.value or g_pool.ref_y.value: ###if pattern detected
                    gl.glColor4f(0.0,0.5,0.0,1.0)
                else:
                    gl.glColor4f(0.5,0.0,0.0,1.0)
                gl.glBegin(gl.GL_POINTS)
                gl.glVertex3f(grid[circle_id][0],grid[circle_id][1],0.0)
                gl.glEnd()

            elif g_pool.play.value:
                s, img = player.captures[player.current_video].read_RGB()
                if s:
                    draw_gl_texture(image)
                else:
                    player.captures[player.current_video].rewind()
                    player.current_video +=1
                    if player.current_video >= len(player.captures):
                        player.current_video = 0
                    g_pool.play.value = False
            glfwSwapBuffers()

    glfwCloseWindow()
    glfwTerminate()
    print "PLAYER Process closed"
Exemplo n.º 34
0
def show_no_rec_window():
    from pyglui.pyfontstash import fontstash
    from pyglui.ui import get_roboto_font_path
    global rec_dir
    def on_drop(window, count, paths):
        global rec_dir
        rec_dir = paths[0].decode('utf-8')

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir, "user_settings"))
    if VersionFormat(session_settings.get("version", '0.0')) != get_version(version_file):
        logger.info("Session setting are from a  different version of this app. I will not use those.")
        session_settings.clear()
    w, h = session_settings.get('window_size', (1280, 720))
    window_pos = session_settings.get('window_position', window_position_default)

    glfwWindowHint(GLFW_RESIZABLE, 0)
    window = glfwCreateWindow(w, h, 'Pupil Player')
    glfwWindowHint(GLFW_RESIZABLE, 1)

    glfwMakeContextCurrent(window)
    glfwSetWindowPos(window, window_pos[0], window_pos[1])
    glfwSetDropCallback(window, on_drop)

    glfont = fontstash.Context()
    glfont.add_font('roboto', get_roboto_font_path())
    glfont.set_align_string(v_align="center", h_align="middle")
    glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
    gl_utils.basic_gl_setup()
    glClearColor(0.5, .5, 0.5, 0.0)
    text = 'Drop a recording directory onto this window.'
    tip = '(Tip: You can drop a recording directory onto the app icon.)'
    # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
    while not glfwWindowShouldClose(window):

        fb_size = glfwGetFramebufferSize(window)
        hdpi_factor = float(fb_size[0] / glfwGetWindowSize(window)[0])
        gl_utils.adjust_gl_view(*fb_size)

        if rec_dir:
            if is_pupil_rec_dir(rec_dir):
                logger.info("Starting new session with '{}'".format(rec_dir))
                text = "Updating recording format."
                tip = "This may take a while!"
            else:
                logger.error("'{}' is not a valid pupil recording".format(rec_dir))
                tip = "Oops! That was not a valid recording."
                rec_dir = None

        gl_utils.clear_gl_screen()
        glfont.set_blur(10.5)
        glfont.set_color_float((0.0, 0.0, 0.0, 1.))
        glfont.set_size(w/25.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .3*h*hdpi_factor, text)
        glfont.set_size(w/30.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .4*h*hdpi_factor, tip)
        glfont.set_blur(0.96)
        glfont.set_color_float((1., 1., 1., 1.))
        glfont.set_size(w/25.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .3*h*hdpi_factor, text)
        glfont.set_size(w/30.*hdpi_factor)
        glfont.draw_text(w/2*hdpi_factor, .4*h*hdpi_factor, tip)

        glfwSwapBuffers(window)

        if rec_dir:
            update_recording_to_recent(rec_dir)
            glfwSetWindowShouldClose(window, True)

        glfwPollEvents()

    session_settings['window_position'] = glfwGetWindowPos(window)
    session_settings['version'] = str(get_version(version_file))
    session_settings.close()
    del glfont
    glfwDestroyWindow(window)
Exemplo n.º 35
0
Arquivo: main.py Projeto: elmorg/pupil
def show_no_rec_window():
    from pyglui.pyfontstash import fontstash
    from pyglui.ui import get_roboto_font_path

    def on_drop(window, count, paths):
        for x in range(count):
            new_rec_dir = paths[x]
            if is_pupil_rec_dir(new_rec_dir):
                logger.debug("Starting new session with '%s'" % new_rec_dir)
                global rec_dir
                rec_dir = new_rec_dir
                glfwSetWindowShouldClose(window, True)
            else:
                logger.error("'%s' is not a valid pupil recording" %
                             new_rec_dir)

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir, "user_settings"))
    if session_settings.get("version",
                            VersionFormat('0.0')) < get_version(version_file):
        logger.info(
            "Session setting are from older version of this app. I will not use those."
        )
        session_settings.clear()
    w, h = session_settings.get('window_size', (1280, 720))
    window_pos = session_settings.get('window_position', (0, 0))

    glfwWindowHint(GLFW_RESIZABLE, 0)
    window = glfwCreateWindow(w, h, 'Pupil Player')
    glfwWindowHint(GLFW_RESIZABLE, 1)

    glfwMakeContextCurrent(window)
    glfwSetWindowPos(window, window_pos[0], window_pos[1])
    glfwSetDropCallback(window, on_drop)

    adjust_gl_view(w, h)
    glfont = fontstash.Context()
    glfont.add_font('roboto', get_roboto_font_path())
    glfont.set_align_string(v_align="center", h_align="middle")
    glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
    basic_gl_setup()
    glClearColor(0.5, .5, 0.5, 0.0)
    text = 'Drop a recording directory onto this window.'
    tip = '(Tip: You can drop a recording directory onto the app icon.)'
    # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
    while not glfwWindowShouldClose(window):
        clear_gl_screen()
        glfont.set_blur(10.5)
        glfont.set_color_float((0.0, 0.0, 0.0, 1.))
        glfont.set_size(w / 25.)
        glfont.draw_text(w / 2, .3 * h, text)
        glfont.set_size(w / 30.)
        glfont.draw_text(w / 2, .4 * h, tip)
        glfont.set_blur(0.96)
        glfont.set_color_float((1., 1., 1., 1.))
        glfont.set_size(w / 25.)
        glfont.draw_text(w / 2, .3 * h, text)
        glfont.set_size(w / 30.)
        glfont.draw_text(w / 2, .4 * h, tip)
        glfwSwapBuffers(window)
        glfwPollEvents()

    session_settings['window_position'] = glfwGetWindowPos(window)
    session_settings['version'] = get_version(version_file)
    session_settings.close()
    del glfont
    glfwDestroyWindow(window)
Exemplo n.º 36
0
def eye(g_pool, cap_src, cap_size, pipe_to_world, eye_id=0):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir,
                                          'eye%s.log' % eye_id),
                             mode='w')
    # fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logger.level + 10)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        'Eye' + str(eye_id) +
        ' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter(
        'EYE' + str(eye_id) +
        ' Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    #UI Platform tweaks
    if platform.system() == 'Linux':
        scroll_factor = 10.0
        window_position_default = (600, 300 * eye_id)
    elif platform.system() == 'Windows':
        scroll_factor = 1.0
        window_position_default = (600, 31 + 300 * eye_id)
    else:
        scroll_factor = 1.0
        window_position_default = (600, 300 * eye_id)

    # Callback functions
    def on_resize(window, w, h):
        if not g_pool.iconified:
            active_window = glfwGetCurrentContext()
            glfwMakeContextCurrent(window)
            g_pool.gui.update_window(w, h)
            graph.adjust_size(w, h)
            adjust_gl_view(w, h)
            glfwMakeContextCurrent(active_window)

    def on_key(window, key, scancode, action, mods):
        g_pool.gui.update_key(key, scancode, action, mods)

    def on_char(window, char):
        g_pool.gui.update_char(char)

    def on_iconify(window, iconified):
        g_pool.iconified = iconified

    def on_button(window, button, action, mods):
        if g_pool.display_mode == 'roi':
            if action == GLFW_RELEASE and u_r.active_edit_pt:
                u_r.active_edit_pt = False
                return  # if the roi interacts we dont what the gui to interact as well
            elif action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos, glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(
                    pos, (frame.width, frame.height))  # Position in img pixels
                if u_r.mouse_over_edit_pt(pos, u_r.handle_size + 40,
                                          u_r.handle_size + 40):
                    return  # if the roi interacts we dont what the gui to interact as well

        g_pool.gui.update_button(button, action, mods)

    def on_pos(window, x, y):
        hdpi_factor = float(
            glfwGetFramebufferSize(window)[0] / glfwGetWindowSize(window)[0])
        g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

        if u_r.active_edit_pt:
            pos = normalize((x, y), glfwGetWindowSize(main_window))
            if g_pool.flip:
                pos = 1 - pos[0], 1 - pos[1]
            pos = denormalize(pos, (frame.width, frame.height))
            u_r.move_vertex(u_r.active_pt_idx, pos)

    def on_scroll(window, x, y):
        g_pool.gui.update_scroll(x, y * scroll_factor)

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')

    # load session persistent settings
    session_settings = Persistent_Dict(
        os.path.join(g_pool.user_dir, 'user_settings_eye%s' % eye_id))
    if session_settings.get("version", VersionFormat('0.0')) < g_pool.version:
        logger.info(
            "Session setting are from older version of this app. I will not use those."
        )
        session_settings.clear()
    # Initialize capture
    cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
    default_settings = {'frame_size': cap_size, 'frame_rate': 30}
    previous_settings = session_settings.get('capture_settings', None)
    if previous_settings and previous_settings['name'] == cap.name:
        cap.settings = previous_settings
    else:
        cap.settings = default_settings

    # Test capture
    try:
        frame = cap.get_frame()
    except CameraCaptureError:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return

    #signal world that we are ready to go
    pipe_to_world.send('eye%s process ready' % eye_id)

    # any object we attach to the g_pool object *from now on* will only be visible to this process!
    # vars should be declared here to make them visible to the code reader.
    g_pool.iconified = False
    g_pool.capture = cap
    g_pool.flip = session_settings.get('flip', False)
    g_pool.display_mode = session_settings.get('display_mode', 'camera_image')
    g_pool.display_mode_info_text = {
        'camera_image':
        "Raw eye camera image. This uses the least amount of CPU power",
        'roi':
        "Click and drag on the blue circles to adjust the region of interest. The region should be a small as possible but big enough to capture to pupil in its movements",
        'algorithm':
        "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters with in the Pupil Detection menu below."
    }
    # g_pool.draw_pupil = session_settings.get('draw_pupil',True)

    u_r = UIRoi(frame.img.shape)
    u_r.set(session_settings.get('roi', u_r.get()))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    # UI callback functions
    def set_scale(new_scale):
        g_pool.gui.scale = new_scale
        g_pool.gui.collect_menus()

    def set_display_mode_info(val):
        g_pool.display_mode = val
        g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

    # Initialize glfw
    glfwInit()
    if g_pool.binocular:
        title = "Binocular eye %s" % eye_id
    else:
        title = 'Eye'
    width, height = session_settings.get('window_size',
                                         (frame.width, frame.height))
    main_window = glfwCreateWindow(width, height, title, None, None)
    window_pos = session_settings.get('window_position',
                                      window_position_default)
    glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
    glfwMakeContextCurrent(main_window)
    cygl_init()

    # gl_state settings
    basic_gl_setup()
    g_pool.image_tex = Named_Texture()
    g_pool.image_tex.update_from_frame(frame)
    glfwSwapInterval(0)

    #setup GUI
    g_pool.gui = ui.UI()
    g_pool.gui.scale = session_settings.get('gui_scale', 1)
    g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                       pos=(-300, 0),
                                       size=(0, 0),
                                       header_pos='left')
    general_settings = ui.Growing_Menu('General')
    general_settings.append(
        ui.Slider('scale',
                  g_pool.gui,
                  setter=set_scale,
                  step=.05,
                  min=1.,
                  max=2.5,
                  label='Interface Size'))
    general_settings.append(
        ui.Button(
            'Reset window size',
            lambda: glfwSetWindowSize(main_window, frame.width, frame.height)))
    general_settings.append(
        ui.Selector('display_mode',
                    g_pool,
                    setter=set_display_mode_info,
                    selection=['camera_image', 'roi', 'algorithm'],
                    labels=['Camera Image', 'ROI', 'Algorithm'],
                    label="Mode"))
    general_settings.append(
        ui.Switch('flip', g_pool, label='Flip image display'))
    g_pool.display_mode_info = ui.Info_Text(
        g_pool.display_mode_info_text[g_pool.display_mode])
    general_settings.append(g_pool.display_mode_info)
    g_pool.sidebar.append(general_settings)
    g_pool.gui.append(g_pool.sidebar)
    # let the camera add its GUI
    g_pool.capture.init_gui(g_pool.sidebar)
    # let detector add its GUI
    pupil_detector.init_gui(g_pool.sidebar)

    # Register callbacks main_window
    glfwSetFramebufferSizeCallback(main_window, on_resize)
    glfwSetWindowCloseCallback(main_window, on_close)
    glfwSetWindowIconifyCallback(main_window, on_iconify)
    glfwSetKeyCallback(main_window, on_key)
    glfwSetCharCallback(main_window, on_char)
    glfwSetMouseButtonCallback(main_window, on_button)
    glfwSetCursorPosCallback(main_window, on_pos)
    glfwSetScrollCallback(main_window, on_scroll)

    #set the last saved window size
    on_resize(main_window, *glfwGetWindowSize(main_window))

    # load last gui configuration
    g_pool.gui.configuration = session_settings.get('ui_config', {})

    #set up performance graphs
    pid = os.getpid()
    ps = psutil.Process(pid)
    ts = frame.timestamp

    cpu_graph = graph.Bar_Graph()
    cpu_graph.pos = (20, 130)
    cpu_graph.update_fn = ps.cpu_percent
    cpu_graph.update_rate = 5
    cpu_graph.label = 'CPU %0.1f'

    fps_graph = graph.Bar_Graph()
    fps_graph.pos = (140, 130)
    fps_graph.update_rate = 5
    fps_graph.label = "%0.0f FPS"

    #create a timer to control window update frequency
    window_update_timer = timer(1 / 60.)

    def window_should_update():
        return next(window_update_timer)

    # Event loop
    while not g_pool.quit.value:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Capture from Camera Failed. Stopping.")
            break
        except EndofVideoFileError:
            logger.warning("Video File is done. Stopping")
            break

        #update performace graphs
        t = frame.timestamp
        dt, ts = t - ts, t
        try:
            fps_graph.add(1. / dt)
        except ZeroDivisionError:
            pass
        cpu_graph.update()

        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if pipe_to_world.poll():
            command, raw_mode = pipe_to_world.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %s" % record_path)
                timestamps_path = os.path.join(record_path,
                                               "eye%s_timestamps.npy" % eye_id)
                if raw_mode and frame.jpeg_buffer:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = JPEG_Writer(video_path, cap.frame_rate)
                else:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = AV_Writer(video_path, cap.frame_rate)
                timestamps = []
            else:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path, np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write_video_frame(frame)
            timestamps.append(frame.timestamp)

        # pupil ellipse detection
        result = pupil_detector.detect(
            frame, user_roi=u_r, visualize=g_pool.display_mode == 'algorithm')
        result['id'] = eye_id
        # stream the result
        g_pool.pupil_queue.put(result)

        # GL drawing
        if window_should_update():
            if not g_pool.iconified:
                glfwMakeContextCurrent(main_window)
                clear_gl_screen()

                # switch to work in normalized coordinate space
                if g_pool.display_mode == 'algorithm':
                    g_pool.image_tex.update_from_ndarray(frame.img)
                elif g_pool.display_mode in ('camera_image', 'roi'):
                    g_pool.image_tex.update_from_ndarray(frame.gray)
                else:
                    pass

                make_coord_system_norm_based(g_pool.flip)
                g_pool.image_tex.draw()
                # switch to work in pixel space
                make_coord_system_pixel_based((frame.height, frame.width, 3),
                                              g_pool.flip)

                if result['confidence'] > 0:
                    if result.has_key('axes'):
                        pts = cv2.ellipse2Poly((int(
                            result['center'][0]), int(result['center'][1])),
                                               (int(result['axes'][0] / 2),
                                                int(result['axes'][1] / 2)),
                                               int(result['angle']), 0, 360,
                                               15)
                        cygl_draw_polyline(pts, 1, cygl_rgba(1., 0, 0, .5))
                    cygl_draw_points([result['center']],
                                     size=20,
                                     color=cygl_rgba(1., 0., 0., .5),
                                     sharpness=1.)

                # render graphs
                graph.push_view()
                fps_graph.draw()
                cpu_graph.draw()
                graph.pop_view()

                # render GUI
                g_pool.gui.update()

                #render the ROI
                if g_pool.display_mode == 'roi':
                    u_r.draw(g_pool.gui.scale)

                #update screen
                glfwSwapBuffers(main_window)
            glfwPollEvents()

    # END while running

    # in case eye recording was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path, np.asarray(timestamps))

    glfwRestoreWindow(main_window)  #need to do this for windows os
    # save session persistent settings
    session_settings['gui_scale'] = g_pool.gui.scale
    session_settings['roi'] = u_r.get()
    session_settings['flip'] = g_pool.flip
    session_settings['display_mode'] = g_pool.display_mode
    session_settings['ui_config'] = g_pool.gui.configuration
    session_settings['capture_settings'] = g_pool.capture.settings
    session_settings['window_size'] = glfwGetWindowSize(main_window)
    session_settings['window_position'] = glfwGetWindowPos(main_window)
    session_settings['version'] = g_pool.version
    session_settings.close()

    pupil_detector.cleanup()
    g_pool.gui.terminate()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    cap.close()

    #flushing queue in case world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
Exemplo n.º 37
0
def eye(g_pool, cap_src, cap_size):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir, 'eye.log'),
                             mode='w')
    fh.setLevel(logging.INFO)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARNING)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        'EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter(
        'E Y E Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    # Callback functions
    def on_resize(window, w, h):
        adjust_gl_view(w, h)
        atb.TwWindowSize(w, h)

    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key, int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                if key == GLFW_KEY_ESCAPE:
                    on_close(window)

    def on_char(window, char):
        if not atb.TwEventCharGLFW(char, 1):
            pass

    def on_button(window, button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button, int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos, glfwGetWindowSize(window))
                pos = denormalize(
                    pos, (frame.img.shape[1],
                          frame.img.shape[0]))  # pos in frame.img pixels
                u_r.setStart(pos)
                bar.draw_roi.value = 1
            else:
                bar.draw_roi.value = 0

    def on_pos(window, x, y):
        if atb.TwMouseMotion(int(x), int(y)):
            pass
        if bar.draw_roi.value == 1:
            pos = x, y
            pos = normalize(pos, glfwGetWindowSize(window))
            pos = denormalize(pos,
                              (frame.img.shape[1],
                               frame.img.shape[0]))  # pos in frame.img pixels
            u_r.setEnd(pos)

    def on_scroll(window, x, y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')

    # Helper functions called by the main atb bar
    def start_roi():
        bar.display.value = 1
        bar.draw_roi.value = 2

    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1. / dt - bar.fps.value)
            bar.dt.value = dt

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    # load session persistent settings
    session_settings = shelve.open(os.path.join(g_pool.user_dir,
                                                'user_settings_eye'),
                                   protocol=2)

    def load(var_name, default):
        return session_settings.get(var_name, default)

    def save(var_name, var):
        session_settings[var_name] = var

    # Initialize capture
    cap = autoCreateCapture(cap_src, cap_size)

    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    # check if it works
    frame = cap.get_frame()
    if frame.img is None:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return
    height, width = frame.img.shape[:2]
    cap.auto_rewind = False

    u_r = Roi(frame.img.shape)
    u_r.set(load('roi', default=None))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    atb.init()
    # Create main ATB Controls
    bar = atb.Bar(name="Eye",
                  label="Display",
                  help="Scene controls",
                  color=(50, 50, 50),
                  alpha=100,
                  text='light',
                  position=(10, 10),
                  refresh=.3,
                  size=(200, 100))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.dt = c_float(0.0)
    bar.sleep = c_float(0.0)
    bar.display = c_int(load('bar.display', 0))
    bar.draw_pupil = c_bool(load('bar.draw_pupil', True))
    bar.draw_roi = c_int(0)

    dispay_mode_enum = atb.enum("Mode", {
        "Camera Image": 0,
        "Region of Interest": 1,
        "Algorithm": 2
    })

    bar.add_var("FPS", bar.fps, step=1., readonly=True)
    bar.add_var("Mode",
                bar.display,
                vtype=dispay_mode_enum,
                help="select the view-mode")
    bar.add_var("Show_Pupil_Point", bar.draw_pupil)
    bar.add_button("Draw_ROI",
                   start_roi,
                   help="drag on screen to select a region of interest")

    bar.add_var("SlowDown", bar.sleep, step=0.01, min=0.0)
    bar.add_var("SaveSettings&Exit", g_pool.quit)

    cap.create_atb_bar(pos=(220, 10))

    # create a bar for the detector
    pupil_detector.create_atb_bar(pos=(10, 120))

    glfwInit()
    window = glfwCreateWindow(width, height, "Eye", None, None)
    glfwMakeContextCurrent(window)

    # Register callbacks window
    glfwSetWindowSizeCallback(window, on_resize)
    glfwSetWindowCloseCallback(window, on_close)
    glfwSetKeyCallback(window, on_key)
    glfwSetCharCallback(window, on_char)
    glfwSetMouseButtonCallback(window, on_button)
    glfwSetCursorPosCallback(window, on_pos)
    glfwSetScrollCallback(window, on_scroll)

    glfwSetWindowPos(window, 800, 0)
    on_resize(window, width, height)

    # gl_state settings
    basic_gl_setup()

    # refresh speed settings
    glfwSwapInterval(0)

    # event loop
    while not g_pool.quit.value:
        frame = cap.get_frame()
        if frame.img is None:
            break
        update_fps()
        sleep(bar.sleep.value)  # for debugging only

        if pupil_detector.should_sleep:
            sleep(16)
            pupil_detector.should_sleep = False

        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if g_pool.eye_rx.poll():
            command = g_pool.eye_rx.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %(record_path)s")
                video_path = os.path.join(record_path, "eye.avi")
                timestamps_path = os.path.join(record_path,
                                               "eye_timestamps.npy")
                writer = cv2.VideoWriter(
                    video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value,
                    (frame.img.shape[1], frame.img.shape[0]))
                timestamps = []
            else:
                logger.info("Done recording eye.")
                writer = None
                np.save(timestamps_path, np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write(frame.img)
            timestamps.append(frame.timestamp)

        # pupil ellipse detection
        result = pupil_detector.detect(frame,
                                       user_roi=u_r,
                                       visualize=bar.display.value == 2)
        # stream the result
        g_pool.pupil_queue.put(result)

        # VISUALIZATION direct visualizations on the frame.img data
        if bar.display.value == 1:
            # and a solid (white) frame around the user defined ROI
            r_img = frame.img[u_r.lY:u_r.uY, u_r.lX:u_r.uX]
            r_img[:, 0] = 255, 255, 255
            r_img[:, -1] = 255, 255, 255
            r_img[0, :] = 255, 255, 255
            r_img[-1, :] = 255, 255, 255

        # GL-drawing
        clear_gl_screen()
        draw_gl_texture(frame.img)

        if result['norm_pupil'] is not None and bar.draw_pupil.value:
            if result.has_key('axes'):
                pts = cv2.ellipse2Poly(
                    (int(result['center'][0]), int(result['center'][1])),
                    (int(result["axes"][0] / 2), int(result["axes"][1] / 2)),
                    int(result["angle"]), 0, 360, 15)
                draw_gl_polyline(pts, (1., 0, 0, .5))
            draw_gl_point_norm(result['norm_pupil'], color=(1., 0., 0., 0.5))

        atb.draw()
        glfwSwapBuffers(window)
        glfwPollEvents()

    # END while running

    # in case eye reconding was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path, np.asarray(timestamps))

    # save session persistent settings
    save('roi', u_r.get())
    save('bar.display', bar.display.value)
    save('bar.draw_pupil', bar.draw_pupil.value)
    session_settings.close()

    pupil_detector.cleanup()
    cap.close()
    atb.terminate()
    glfwDestroyWindow(window)
    glfwTerminate()

    #flushing queue incase world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
Exemplo n.º 38
0
def world(g_pool):
    """world
    """

    ###Callback funtions
    def on_resize(w, h):
        atb.TwWindowSize(w, h);
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key,pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char,pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button,pressed):
            if pressed:
                pos = glfwGetMousePos()
                pos = normalize(pos,glfwGetWindowSize())
                pos = denormalize(pos,(img.shape[1],img.shape[0]) ) #pos in img pixels
                ref.detector.new_ref(pos)


    def on_pos(x, y):
        if atb.TwMouseMotion(x,y):
            pass

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "WORLD Process closing from window"

    ref = Temp()
    ref.detector = no_Detector(g_pool.calibrate,g_pool.ref_x,g_pool.ref_y)
    ###objects as variable containers
    # pattern object
    pattern = Temp()
    pattern.centers = None
    pattern.obj_grid = gen_pattern_grid((4, 11))  # calib grid
    pattern.obj_points = []
    pattern.img_points = []
    pattern.map = (0, 2, 7, 16, 21, 23, 39, 40, 42)
    pattern.board_centers = None

    # gaze object
    gaze = Temp()
    gaze.map_coords = (0., 0.)
    gaze.image_coords = (0., 0.)
    # record object
    record = Temp()
    record.writer = None
    record.path_parent = os.path.dirname(os.path.abspath(sys.argv[0]))
    record.path = None
    record.counter = 0

    # initialize capture, check if it works
    cap = autoCreateCapture(g_pool.world_src, g_pool.world_size)
    if cap is None:
        print "WORLD: Error could not create Capture"
        return
    s, img = cap.read_RGB()
    if not s:
        print "WORLD: Error could not get image"
        return
    height,width = img.shape[:2]


    ###helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1 / dt - bar.fps.value)

    def set_window_size(mode,data):
        height,width = img.shape[:2]
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(w,h)
        data.value=mode #update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value


    def start_calibration():

        c_type = bar.calibration_type.value
        if  c_type == cal_type["Directed 9-Point"]:
            print 'WORLD: Starting Directed 9-Point calibration.'
            ref.detector = Nine_Point_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y,
                                            shared_stage=g_pool.cal9_stage,
                                            shared_step=g_pool.cal9_step,
                                            shared_cal9_active=g_pool.cal9,
                                            shared_circle_id=g_pool.cal9_circle_id,
                                            auto_advance=False)
        elif c_type == cal_type["Automated 9-Point"]:
            print 'WORLD: Starting Automated 9-Point calibration.'
            ref.detector = Nine_Point_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y,
                                            shared_stage=g_pool.cal9_stage,
                                            shared_step=g_pool.cal9_step,
                                            shared_cal9_active=g_pool.cal9,
                                            shared_circle_id=g_pool.cal9_circle_id,
                                            auto_advance=True)
        elif c_type == cal_type["Natural Features"]:
            print 'WORLD: Starting Natural Features calibration.'
            ref.detector = Natural_Features_Detector(global_calibrate=g_pool.calibrate,
                                                    shared_x=g_pool.ref_x,
                                                    shared_y=g_pool.ref_y)
        elif c_type == cal_type["Black Dot"]:
            print 'WORLD: Starting Black Dot calibration.'
            ref.detector = Black_Dot_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y)
    def advance_calibration():
        ref.detector.advance()

    def stop_calibration():
        ref.detector = no_Detector(global_calibrate=g_pool.calibrate,
                                shared_x=g_pool.ref_x,
                                shared_y=g_pool.ref_y)

    ### Initialize ant tweak bar inherits from atb.Bar
    atb.init()
    bar = atb.Bar(name = "World", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,
            text='light', position=(10, 10),refresh=.3, size=(200, 200))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.calibration_type = c_int(1)
    bar.show_calib_result = c_bool(0)
    bar.calibration_images = False
    bar.record_video = c_bool(0)
    bar.record_running = c_bool(0)
    bar.play = g_pool.play
    bar.window_size = c_int(0)
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})
    cal_type = {"Directed 9-Point":0,"Automated 9-Point":1,"Natural Features":3,"Black Dot":4}#"Manual 9-Point":2
    calibrate_type_enum = atb.enum("Calibration Method",cal_type)
    bar.rec_name = create_string_buffer(512)

    # play and record can be tied together via pointers to the objects
    # bar.play = bar.record_video
    bar.add_var("FPS", bar.fps, step=1., readonly=True)
    bar.add_var("Display_Size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("Cal/Calibration_Method",bar.calibration_type, vtype=calibrate_type_enum)
    bar.add_button("Cal/Start_Calibration",start_calibration, key='c')
    bar.add_button("Cal/Next_Point",advance_calibration,key="SPACE", help="Hit space to calibrate on next dot")
    bar.add_button("Cal/Stop_Calibration",stop_calibration, key='d')
    bar.add_var("Cal/show_calibration_result",bar.show_calib_result, help="yellow lines indecate fit error, red outline shows the calibrated area.")
    bar.add_var("Rec/rec_name",bar.rec_name)
    bar.add_var("Rec/Record_Video", bar.record_video, key="r", help="Start/Stop Recording")
    bar.add_separator("Sep1")
    bar.add_var("Play Source Video", bar.play)
    bar.add_var("Exit", g_pool.quit)

    #add 4vl2 camera controls to a seperate ATB bar
    if cap.controls is not None:
        c_bar = atb.Bar(name="Camera_Controls", label=cap.name,
            help="UVC Camera Controls", color=(50,50,50), alpha=100,
            text='light',position=(220, 10),refresh=2., size=(200, 200))

        # c_bar.add_var("auto_refresher",vtype=atb.TW_TYPE_BOOL8,getter=cap.uvc_refresh_all,setter=None,readonly=True)
        # c_bar.define(definition='visible=0', varname="auto_refresher")

        sorted_controls = [c for c in cap.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        for control in sorted_controls:
            name = control.atb_name
            if control.type=="bool":
                c_bar.add_var(name,vtype=atb.TW_TYPE_BOOL8,getter=control.get_val,setter=control.set_val)
            elif control.type=='int':
                c_bar.add_var(name,vtype=atb.TW_TYPE_INT32,getter=control.get_val,setter=control.set_val)
                c_bar.define(definition='min='+str(control.min),   varname=name)
                c_bar.define(definition='max='+str(control.max),   varname=name)
                c_bar.define(definition='step='+str(control.step), varname=name)
            elif control.type=="menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype= atb.enum(name,control.menu)
                c_bar.add_var(name,vtype=vtype,getter=control.get_val,setter=control.set_val)
                if control.menu is None:
                    c_bar.define(definition='min='+str(control.min),   varname=name)
                    c_bar.define(definition='max='+str(control.max),   varname=name)
                    c_bar.define(definition='step='+str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # c_bar.define(definition='readonly=1',varname=control.name)

        c_bar.add_button("refresh",cap.update_from_device)
        c_bar.add_button("load defaults",cap.load_defaults)

    else:
        c_bar = None

    ### Initialize glfw
    glfwInit()
    height,width = img.shape[:2]
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("World")
    glfwSetWindowPos(0,0)

    #register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    #gl_state settings
    import OpenGL.GL as gl
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glPointSize(20)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)


    ###event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        update_fps()
        # get an image from the grabber
        s, img = cap.read()
        ref.detector.detect(img)
        if ref.detector.is_done():
            stop_calibration()

        g_pool.player_refresh.set()


        # #gather pattern centers and find cam intrisics
        # if bar.screen_shot and pattern.centers is not None:
        #     bar.screen_shot = False
        #     # calibrate the camera intrinsics if the board is found
        #     # append list of circle grid center points to pattern.img_points
        #     # append generic list of circle grid pattern type to  pattern.obj_points
        #     pattern.centers = circle_grid(img)
        #     pattern.img_points.append(pattern.centers)
        #     pattern.obj_points.append(pattern.obj_grid)
        #     print "Number of Patterns Captured:", len(pattern.img_points)
        #     #if pattern.img_points.shape[0] > 10:
        #     if len(pattern.img_points) > 10:
        #         camera_matrix, dist_coefs = calibrate_camera(np.asarray(pattern.img_points),
        #                                             np.asarray(pattern.obj_points),
        #                                             (img.shape[1], img.shape[0]))
        #         np.save("camera_matrix.npy", camera_matrix)
        #         np.save("dist_coefs.npy", dist_coefs)
        #         pattern.img_points = []
        #         bar.find_pattern.value = False

        ### Setup recording process
        if bar.record_video and not bar.record_running:
            record.path = os.path.join(record.path_parent, "data%03d/" % record.counter)
            while True:
                try:
                    os.mkdir(record.path)
                    break
                except:
                    print "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                    record.counter += 1
                    record.path = os.path.join(record.path_parent, "data%03d/" % record.counter)

            #video
            video_path = os.path.join(record.path, "world.avi")
            #FFV1 -- good speed lossless big file
            #DIVX -- good speed good compression medium file
            record.writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (img.shape[1], img.shape[0]))


            # positions data to eye process
            g_pool.pos_record.value = True
            g_pool.eye_tx.send(record.path)

            bar.record_running = 1
            g_pool.frame_count_record.value = 0

        # While Recording...
        if bar.record_video and bar.record_running:
            # Save image frames to video writer
            # increment the frame_count_record value
            # Eye positions can be associated with frames of recording even if different framerates are used
            record.writer.write(img)
            g_pool.frame_count_record.value += 1


        # Finish all recordings, clean up.
        if not bar.record_video and bar.record_running:
            # for conviniece: copy camera intrinsics into each data folder at the end of a recording.
            try:
                camera_matrix = np.load("camera_matrix.npy")
                dist_coefs = np.load("dist_coefs.npy")
                cam_path = os.path.join(record.path, "camera_matrix.npy")
                dist_path = os.path.join(record.path, "dist_coefs.npy")
                np.save(cam_path, camera_matrix)
                np.save(dist_path, dist_coefs)
            except:
                print "no camera intrinsics found, will not copy them into data folder"

            g_pool.pos_record.value = 0
            del record.writer
            bar.record_running = 0



        ###render the screen
        clear_gl_screen()
        cv2.cvtColor(img, cv2.COLOR_BGR2RGB,img)
        draw_gl_texture(img)

        ###render calibration results:
        if bar.show_calib_result.value:
            cal_pt_cloud = np.load("cal_pt_cloud.npy")
            pX,pY,wX,wY = cal_pt_cloud.transpose()
            map_fn = get_map_from_cloud(cal_pt_cloud,(width,height))
            modelled_world_pts = map_fn((pX,pY))
            pts = np.array(modelled_world_pts,dtype=np.float32).transpose()
            calib_bounds =  cv2.convexHull(pts)[:,0]
            for observed,modelled in zip(zip(wX,wY),np.array(modelled_world_pts).transpose()):
                draw_gl_polyline_norm((modelled,observed),(1.,0.5,0.,.5))
            draw_gl_polyline_norm(calib_bounds,(1.0,0,0,.5))

        #render visual feedback from detector
        ref.detector.display(img)
        # render detector point
        if ref.detector.pos[0] or ref.detector.pos[1]:
            draw_gl_point_norm(ref.detector.pos,(0.,1.,0.,0.5))

        # update gaze point from shared variable pool and draw on screen. If both coords are 0: no pupil pos was detected.
        if g_pool.gaze_x.value !=0 or g_pool.gaze_y.value !=0:
            draw_gl_point_norm((g_pool.gaze_x.value, g_pool.gaze_y.value),(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers()

    ###end while running clean-up
    print "WORLD Process closed"
    glfwCloseWindow()
    glfwTerminate()
Exemplo n.º 39
0
    def gl_display_in_window(self):
        if glfwWindowShouldClose(self._window):
            self.stop()
            return

        p_window_size = glfwGetFramebufferSize(self._window)
        if p_window_size == (0, 0):
            # On Windows we get a window_size of (0, 0) when either minimizing the
            # Window or when tabbing out (rendered only in the background). We get
            # errors when we call the code below with window size (0, 0). Anyways we
            # probably want to stop calibration in this case as it will screw up the
            # calibration anyways.
            self.stop()
            return

        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        hdpi_factor = getHDPIFactor(self._window)
        r = self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        gl.glOrtho(0, p_window_size[0], p_window_size[1], 0, -1, 1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        def map_value(value, in_range=(0, 1), out_range=(0, 1)):
            ratio = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0])
            return (value - in_range[0]) * ratio + out_range[0]

        pad = 90 * r
        screen_pos = (
            map_value(self.display_pos[0],
                      out_range=(pad, p_window_size[0] - pad)),
            map_value(self.display_pos[1],
                      out_range=(p_window_size[1] - pad, pad)),
        )
        alpha = interp_fn(
            self.screen_marker_state,
            0.0,
            1.0,
            float(self.sample_duration + self.lead_in + self.lead_out),
            float(self.lead_in),
            float(self.sample_duration + self.lead_in),
        )

        r2 = 2 * r
        draw_points([screen_pos],
                    size=60 * r2,
                    color=RGBA(0.0, 0.0, 0.0, alpha),
                    sharpness=0.9)
        draw_points([screen_pos],
                    size=38 * r2,
                    color=RGBA(1.0, 1.0, 1.0, alpha),
                    sharpness=0.8)
        draw_points([screen_pos],
                    size=19 * r2,
                    color=RGBA(0.0, 0.0, 0.0, alpha),
                    sharpness=0.55)

        # some feedback on the detection state
        color = (RGBA(0.0, 0.8, 0.0, alpha) if len(self.markers)
                 and self.on_position else RGBA(0.8, 0.0, 0.0, alpha))
        draw_points([screen_pos], size=3 * r2, color=color, sharpness=0.5)

        if self.clicks_to_close < 5:
            self.glfont.set_size(int(p_window_size[0] / 30.0))
            self.glfont.draw_text(
                p_window_size[0] / 2.0,
                p_window_size[1] / 4.0,
                "Touch {} more times to cancel {}.".format(
                    self.clicks_to_close, self.mode_pretty),
            )

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 40
0
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
    hide_ui=False,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
        ``set_detection_mapping_mode``: Sets detection method
        ``eye_process.should_stop``: Stops the eye process
        ``recording.started``: Starts recording eye video
        ``recording.stopped``: Stops recording eye video
        ``frame_publishing.started``: Starts frame publishing
        ``frame_publishing.stopped``: Stops frame publishing
        ``start_eye_plugin``: Start plugins in eye process

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # Plug-ins
        from plugin import Plugin_List

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, MPEG_Writer
        from ndsi import H264Writer
        from video_capture import source_classes, manager_classes

        from background_helper import IPC_Logging_Task_Proxy
        from pupil_detector_plugins import available_detector_plugins
        from pupil_detector_plugins.manager import PupilDetectorManager

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        def interrupt_handler(sig, frame):
            import traceback

            trace = traceback.format_stack(f=frame)
            logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
            # NOTE: Interrupt is handled in world/service/player which are responsible for
            # shutting down the eye process properly

        signal.signal(signal.SIGINT, interrupt_handler)

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.eye_id = eye_id
        g_pool.process = f"eye{eye_id}"
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        default_detector_cls, available_detectors = available_detector_plugins(
        )
        plugins = (manager_classes + source_classes + available_detectors +
                   [PupilDetectorManager])
        g_pool.plugin_by_name = {p.__name__: p for p in plugins}

        preferred_names = [
            f"Pupil Cam3 ID{eye_id}",
            f"Pupil Cam2 ID{eye_id}",
            f"Pupil Cam1 ID{eye_id}",
        ]
        if eye_id == 0:
            preferred_names += ["HD-6000"]
        default_capture_settings = (
            "UVC_Source",
            {
                "preferred_names": preferred_names,
                "frame_size": (320, 240),
                "frame_rate": 120,
            },
        )

        default_plugins = [
            # TODO: extend with plugins
            default_capture_settings,
            ("UVC_Manager", {}),
            # Detector needs to be loaded first to set `g_pool.pupil_detector`
            (default_detector_cls.__name__, {}),
            ("PupilDetectorManager", {}),
        ]

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            for plugin in g_pool.plugins:
                if plugin.on_drop(paths):
                    break

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir,
                         "user_settings_eye{}".format(eye_id)))
        if VersionFormat(session_settings.get("version",
                                              "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode",
                                                   "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image":
            "Raw eye camera image. This uses the least amount of CPU power",
            "roi":
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm":
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        if hide_ui:
            glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0)  # hide window
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = session_settings.get("window_size",
                                             (640 + icon_bar_width, 480))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position",
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(
            np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu("Settings",
                                           pos=(-500, 0),
                                           size=(-icon_bar_width, 0),
                                           header_pos="left")
        g_pool.iconbar = ui.Scrolling_Menu("Icons",
                                           pos=(-icon_bar_width, 0),
                                           size=(0, 0),
                                           header_pos="hidden")
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            ))

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos,
                        g_pool.capture.frame_size)  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,
                                                     g_pool.u_r.handle_size,
                                                     g_pool.u_r.handle_size):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size",
                                          set_window_size))
        general_settings.append(
            ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            ))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        plugins_to_load = session_settings.get("loaded_plugins",
                                               default_plugins)
        if overwrite_cap_settings:
            # Ensure that overwrite_cap_settings takes preference over source plugins
            # with incorrect settings that were loaded from session settings.
            plugins_to_load.append(overwrite_cap_settings)

        g_pool.plugins = Plugin_List(g_pool, plugins_to_load)

        g_pool.writer = None

        g_pool.u_r = UIRoi(
            (g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(
                roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        start_time_synced = notification["start_time_synced"]
                        logger.info(
                            "Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path,
                                                  "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, start_time_synced)
                        elif hasattr(g_pool.capture._recent_frame,
                                     "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = MPEG_Writer(
                                video_path, start_time_synced)
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify({
                        "subject": "meta.doc",
                        "actor": "eye{}".format(eye_id),
                        "doc": eye.__doc__,
                    })
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (subject.startswith("start_eye_plugin")
                      and notification["target"] == g_pool.process):
                    try:
                        g_pool.plugins.add(
                            g_pool.plugin_by_name[notification["name"]],
                            notification.get("args", {}),
                        )
                    except KeyError as err:
                        logger.error(f"Attempt to load unknown plugin: {err}")

                for plugin in g_pool.plugins:
                    plugin.on_notify(notification)

            event = {}
            for plugin in g_pool.plugins:
                plugin.recent_events(event)

            frame = event.get("frame")
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                        f_height,
                        f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.
                                format(type(frame), frame_publish_format))
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send({
                            "topic":
                            "frame.eye.{}".format(eye_id),
                            "width":
                            frame.width,
                            "height":
                            frame.height,
                            "index":
                            frame.index,
                            "timestamp":
                            frame.timestamp,
                            "format":
                            frame_publish_format,
                            "__raw_data__": [data],
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                result = event.get("pupil_detection_result", None)
                if result is not None:
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    glViewport(0, 0, *camera_render_size)
                    for p in g_pool.plugins:
                        p.gl_display()

                    glViewport(0, 0, *camera_render_size)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    try:
                        clipboard = glfw.glfwGetClipboardString(
                            main_window).decode()
                    except AttributeError:  # clipboard is None, might happen on startup
                        clipboard = ""
                    g_pool.gui.update_clipboard(clipboard)
                    user_input = g_pool.gui.update()
                    if user_input.clipboard != clipboard:
                        # only write to clipboard if content changed
                        glfw.glfwSetClipboardString(
                            main_window, user_input.clipboard.encode())

                    for button, action, mods in user_input.buttons:
                        x, y = glfw.glfwGetCursorPos(main_window)
                        pos = x * hdpi_factor, y * hdpi_factor
                        pos = normalize(pos, camera_render_size)
                        # Position in img pixels
                        pos = denormalize(pos, g_pool.capture.frame_size)

                        for plugin in g_pool.plugins:
                            if plugin.on_click(pos, button, action):
                                break

                    for key, scancode, action, mods in user_input.keys:
                        for plugin in g_pool.plugins:
                            if plugin.on_key(key, scancode, action, mods):
                                break

                    for char_ in user_input.chars:
                        for plugin in g_pool.plugins:
                            if plugin.on_char(char_):
                                break

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer.release()
            g_pool.writer = None

        session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["version"] = str(g_pool.version)

        if not hide_ui:
            glfw.glfwRestoreWindow(
                main_window)  # need to do this for windows os
            session_settings["window_position"] = glfw.glfwGetWindowPos(
                main_window)
            session_window_size = glfw.glfwGetWindowSize(main_window)
            if 0 not in session_window_size:
                session_settings["window_size"] = session_window_size

        session_settings.close()

        for plugin in g_pool.plugins:
            plugin.alive = False
        g_pool.plugins.clean()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")
Exemplo n.º 41
0
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url,ipc_push_url, user_dir, version, eye_id,overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """


    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx,ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx,ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,ipc_sub_url,topics=("notify",))

    with Is_Alive_Manager(is_alive_flag,ipc_socket,eye_id):

        #logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx,ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui,graph,cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen ,make_coord_system_pixel_based,make_coord_system_norm_based, make_coord_system_eye_camera_based,is_window_visible
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math


        # helpers/utils
        from uvc import get_time_monotonic, StreamError
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from av_writer import JPEG_Writer,AV_Writer

        from video_capture import InitialisationError,StreamError, Fake_Source,EndofVideoFileError, source_classes, manager_classes
        source_by_name = {src.class_name():src for src in source_classes}

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}



        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600,300*eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600,31+300*eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600,300*eye_id)


        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic()-g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window,w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w,h)
                graph.adjust_size(w,h)
                adjust_gl_view(w,h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key,scancode,action,mods)

        def on_char(window,char):
            g_pool.gui.update_char(char)

        def on_iconify(window,iconified):
            g_pool.iconified = iconified

        def on_button(window,button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos,glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1-pos[0],1-pos[1]
                    pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,g_pool.u_r.handle_size+40,g_pool.u_r.handle_size+40):
                        return # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button,action,mods)



        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window,x,y):
            g_pool.gui.update_scroll(x,y*scroll_factor)

        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_eye%s'%eye_id))
        if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
            logger.info("Session setting are from older version of this app. I will not use those.")
            session_settings.clear()

        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = {
            'source_class_name': 'UVC_Source',
            'preferred_names'  : cap_src,
            'frame_size': (640,480),
            'frame_rate': 90
        }
        settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        try:
            cap = source_by_name[settings['source_class_name']](g_pool, **settings)
        except (KeyError,InitialisationError) as e:
            if isinstance(e,KeyError):
                logger.warning('Incompatible capture setting encountered. Falling back to fake source.')
            cap = Fake_Source(g_pool, **settings)

        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip',False)
        g_pool.display_mode = session_settings.get('display_mode','camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                    'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                    'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}



        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        writer = None

        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()


        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]


        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)


        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s"%eye_id
        width,height = session_settings.get('window_size',g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width,height, title, None, None)
        window_pos = session_settings.get('window_position',window_position_default)
        glfw.glfwSetWindowPos(main_window,window_pos[0],window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        glfw.glfwSwapInterval(0)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale',1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",pos=(-300,0),size=(0,0),header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Slider('scale',g_pool.gui, setter=set_scale,step = .05,min=1.,max=2.5,label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,frame.width,frame.height)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',g_pool,setter=set_display_mode_info,selection=['camera_image','roi','algorithm'], labels=['Camera Image', 'ROI', 'Algorithm'], label="Mode") )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',getter = lambda: g_pool.pupil_detector.__class__ ,setter=set_detector,selection=[Detector_2D, Detector_3D],labels=['C++ 2d detector', 'C++ 3d detector'], label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)
        g_pool.capture_manager.init_gui()

        def open_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
            'capture_manager',g_pool,
            setter    = open_manager,
            getter    = lambda: g_pool.capture_manager.__class__,
            selection = manager_classes,
            labels    = [b.gui_name for b in manager_classes],
            label     = 'Manager'
        ))

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window,on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window,on_iconify)
        glfw.glfwSetKeyCallback(main_window,on_key)
        glfw.glfwSetCharCallback(main_window,on_char)
        glfw.glfwSetMouseButtonCallback(main_window,on_button)
        glfw.glfwSetCursorPosCallback(main_window,on_pos)
        glfw.glfwSetScrollCallback(main_window,on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))


        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config',{})


        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20,130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140,130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        #create a timer to control window update frequency
        window_update_timer = timer(1/60.)
        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t,notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector,Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only  = True
                    else:
                        if not isinstance(g_pool.pupil_detector,Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye']:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: %s"%record_path)
                        timestamps_path = os.path.join(record_path, "eye%s_timestamps.npy"%eye_id)
                        if raw_mode and frame.jpeg_buffer:
                            video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                            writer = JPEG_Writer(video_path,g_pool.capture.frame_rate)
                        else:
                            video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                            writer = AV_Writer(video_path,g_pool.capture.frame_rate)
                        timestamps = []
                elif subject == 'recording.stopped':
                    if writer:
                        logger.info("Done recording.")
                        writer.release()
                        writer = None
                        np.save(timestamps_path,np.asarray(timestamps))
                        del timestamps
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject':'meta.doc',
                        'actor':'eye%i'%eye_id,
                        'doc':eye.__doc__
                        })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format','jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                else:
                    g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            try:
                frame = g_pool.capture.get_frame()
            except StreamError as e:
                logger.error("Error getting frame. Stopping eye process.")
                logger.debug("Caught error: %s"%e)
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                g_pool.capture.seek_to_frame(0)
                frame = g_pool.capture.get_frame()

            g_pool.u_r = UIRoi((frame.height,frame.width))
            g_pool.capture_manager.update(frame, {})

            if should_publish_frames and frame.jpeg_buffer:
                if   frame_publish_format == "jpeg":
                    data = frame.jpeg_buffer
                elif frame_publish_format == "yuv":
                    data = frame.yuv_buffer
                elif frame_publish_format == "bgr":
                    data = frame.bgr
                elif frame_publish_format == "gray":
                    data = frame.gray
                pupil_socket.send('frame.eye.%s'%eye_id,{
                    'width': frame.width,
                    'height': frame.width,
                    'index': frame.index,
                    'timestamp': frame.timestamp,
                    'format': frame_publish_format,
                    '__raw_data__': [data]
                })


            #update performace graphs
            t = frame.timestamp
            dt,ts = t-ts,t
            try:
                fps_graph.add(1./dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()



            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)


            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            pupil_socket.send('pupil.%s'%eye_id,result)

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image','roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    window_size =  glfw.glfwGetWindowSize(main_window)
                    make_coord_system_pixel_based((frame.height,frame.width,3),g_pool.flip)
                    g_pool.capture.gl_display()

                    if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                                (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                                int(eye_ball['angle']),0,360,8)
                        except ValueError as e:
                            pass
                        else:
                            draw_polyline(pts,2,RGBA(0.,.9,.1,result['model_confidence']) )

                    if result['confidence'] >0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly( (int(result['ellipse']['center'][0]),int(result['ellipse']['center'][1])),
                                            (int(result['ellipse']['axes'][0]/2),int(result['ellipse']['axes'][1]/2)),
                                            int(result['ellipse']['angle']),0,360,15)
                            confidence = result['confidence'] * 0.7 #scale it a little
                            draw_polyline(pts,1,RGBA(1.,0,0,confidence))
                            draw_points([result['ellipse']['center']],size=20,color=RGBA(1.,0.,0.,confidence),sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize() #detector decides if we visualize or not


        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path,np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window) #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = g_pool.version
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")
Exemplo n.º 42
0
# Event loop
while glfwGetWindowParam(GLFW_OPENED): # and not quit.value
    update_fps()

    # Get an image from the grabber
    # s, img = cap.read()
    img = np.zeros((480,640,3))

    # for p in g.plugins:
    #     p.update(img)

    # g.plugins = [p for p in g.plugins if p.alive]

    # render the screen
    clear_gl_screen()
    draw_gl_texture(img)

    # render visual feedback from loaded plugins
    # for p in g.plugins:
    #     p.gl_display()


    atb.draw()
    glfwSwapBuffers()

# end while running and clean-up
print "Browser closed"
glfwCloseWindow()
glfwTerminate()
Exemplo n.º 43
0
def show_no_rec_window():
    from pyglui.pyfontstash import fontstash
    from pyglui.ui import get_roboto_font_path

    def on_drop(window,count,paths):
        for x in range(count):
            new_rec_dir =  paths[x]
            if is_pupil_rec_dir(new_rec_dir):
                logger.debug("Starting new session with '%s'"%new_rec_dir)
                global rec_dir
                rec_dir = new_rec_dir
                glfwSetWindowShouldClose(window,True)
            else:
                logger.error("'%s' is not a valid pupil recording"%new_rec_dir)

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir,"user_settings"))
    if session_settings.get("version",VersionFormat('0.0')) < get_version(version_file):
        logger.info("Session setting are from older version of this app. I will not use those.")
        session_settings.clear()
    w,h = session_settings.get('window_size',(1280,720))
    window_pos = session_settings.get('window_position',(0,0))

    glfwWindowHint(GLFW_RESIZABLE,0)
    window = glfwCreateWindow(w, h,'Pupil Player')
    glfwWindowHint(GLFW_RESIZABLE,1)

    glfwMakeContextCurrent(window)
    glfwSetWindowPos(window,window_pos[0],window_pos[1])
    glfwSetDropCallback(window,on_drop)

    adjust_gl_view(w,h)
    glfont = fontstash.Context()
    glfont.add_font('roboto',get_roboto_font_path())
    glfont.set_align_string(v_align="center",h_align="middle")
    glfont.set_color_float((0.2,0.2,0.2,0.9))
    basic_gl_setup()
    glClearColor(0.5,.5,0.5,0.0)
    text = 'Drop a recording directory onto this window.'
    tip = '(Tip: You can drop a recording directory onto the app icon.)'
    # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
    while not glfwWindowShouldClose(window):
        clear_gl_screen()
        glfont.set_blur(10.5)
        glfont.set_color_float((0.0,0.0,0.0,1.))
        glfont.set_size(w/25.)
        glfont.draw_text(w/2,.3*h,text)
        glfont.set_size(w/30.)
        glfont.draw_text(w/2,.4*h,tip)
        glfont.set_blur(0.96)
        glfont.set_color_float((1.,1.,1.,1.))
        glfont.set_size(w/25.)
        glfont.draw_text(w/2,.3*h,text)
        glfont.set_size(w/30.)
        glfont.draw_text(w/2,.4*h,tip)
        glfwSwapBuffers(window)
        glfwPollEvents()

    session_settings['window_position'] = glfwGetWindowPos(window)
    session_settings['version'] = get_version(version_file)
    session_settings.close()
    del glfont
    glfwDestroyWindow(window)
Exemplo n.º 44
0
def eye(g_pool):
    """
    this process needs a docstring
    """
    # # callback functions
    def on_resize(w, h):
        atb.TwWindowSize(w, h)
        adjust_gl_view(w, h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key, pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char, pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button, pressed):
            if bar.draw_roi.value:
                if pressed:
                    pos = glfwGetMousePos()
                    pos = normalize(pos, glfwGetWindowSize())
                    pos = denormalize(pos, (img.shape[1], img.shape[0]))  # pos in img pixels
                    r.setStart(pos)
                    bar.draw_roi.value = 1
                else:
                    bar.draw_roi.value = 0

    def on_pos(x, y):
        if atb.TwMouseMotion(x, y):
            pass
        if bar.draw_roi.value == 1:
            pos = glfwGetMousePos()
            pos = normalize(pos, glfwGetWindowSize())
            pos = denormalize(pos, (img.shape[1], img.shape[0]))  # pos in img pixels
            r.setEnd(pos)

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "EYE Process closing from window"

    # initialize capture, check if it works
    cap = autoCreateCapture(g_pool.eye_src, g_pool.eye_size)
    if cap is None:
        print "EYE: Error could not create Capture"
        return
    s, img = cap.read_RGB()
    if not s:
        print "EYE: Error could not get image"
        return
    height, width = img.shape[:2]

    # pupil object
    pupil = Temp()
    pupil.norm_coords = (0.0, 0.0)
    pupil.image_coords = (0.0, 0.0)
    pupil.ellipse = None
    pupil.gaze_coords = (0.0, 0.0)

    try:
        pupil.pt_cloud = np.load("cal_pt_cloud.npy")
        map_pupil = get_map_from_cloud(pupil.pt_cloud, g_pool.world_size)  ###world video size here
    except:
        pupil.pt_cloud = None

        def map_pupil(vector):
            return vector

    r = Roi(img.shape)
    p_r = Roi(img.shape)

    # local object
    l_pool = Temp()
    l_pool.calib_running = False
    l_pool.record_running = False
    l_pool.record_positions = []
    l_pool.record_path = None
    l_pool.writer = None
    l_pool.region_r = 20

    atb.init()
    bar = Bar(
        "Eye",
        g_pool,
        dict(
            label="Controls",
            help="eye detection controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=(10, 10),
            refresh=0.1,
            size=(200, 300),
        ),
    )

    # add 4vl2 camera controls to a seperate ATB bar
    if cap.controls is not None:
        c_bar = atb.Bar(
            name="Camera_Controls",
            label=cap.name,
            help="UVC Camera Controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=(220, 10),
            refresh=2.0,
            size=(200, 200),
        )

        # c_bar.add_var("auto_refresher",vtype=atb.TW_TYPE_BOOL8,getter=cap.uvc_refresh_all,setter=None,readonly=True)
        # c_bar.define(definition='visible=0', varname="auto_refresher")

        sorted_controls = [c for c in cap.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        for control in sorted_controls:
            name = control.atb_name
            if control.type == "bool":
                c_bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val)
            elif control.type == "int":
                c_bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val)
                c_bar.define(definition="min=" + str(control.min), varname=name)
                c_bar.define(definition="max=" + str(control.max), varname=name)
                c_bar.define(definition="step=" + str(control.step), varname=name)
            elif control.type == "menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype = atb.enum(name, control.menu)
                c_bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val)
                if control.menu is None:
                    c_bar.define(definition="min=" + str(control.min), varname=name)
                    c_bar.define(definition="max=" + str(control.max), varname=name)
                    c_bar.define(definition="step=" + str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # c_bar.define(definition='readonly=1',varname=control.name)

        c_bar.add_button("refresh", cap.update_from_device)
        c_bar.add_button("load defaults", cap.load_defaults)

    else:
        c_bar = None

    # Initialize glfw
    glfwInit()
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Eye")
    glfwSetWindowPos(800, 0)
    if isinstance(g_pool.eye_src, str):
        glfwSwapInterval(0)  # turn of v-sync when using video as src for benchmarking

    # register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    # gl_state settings
    import OpenGL.GL as gl

    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glPointSize(20)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    del gl

    # event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        bar.update_fps()
        s, img = cap.read_RGB()
        sleep(bar.sleep.value)  # for debugging only

        ###IMAGE PROCESSING
        gray_img = grayscale(img[r.lY : r.uY, r.lX : r.uX])

        integral = cv2.integral(gray_img)
        integral = np.array(integral, dtype=c_float)
        x, y, w = eye_filter(integral)
        if w > 0:
            p_r.set((y, x, y + w, x + w))
        else:
            p_r.set((0, 0, -1, -1))

        # create view into the gray_img with the bounds of the rough pupil estimation
        pupil_img = gray_img[p_r.lY : p_r.uY, p_r.lX : p_r.uX]

        # pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)),iterations=2)
        if True:
            hist = cv2.calcHist(
                [pupil_img], [0], None, [256], [0, 256]
            )  # (images, channels, mask, histSize, ranges[, hist[, accumulate]])
            bins = np.arange(hist.shape[0])
            spikes = bins[hist[:, 0] > 40]  # every color seen in more than 40 pixels
            if spikes.shape[0] > 0:
                lowest_spike = spikes.min()
            offset = 40

            ##display the histogram
            sx, sy = 100, 1
            colors = ((255, 0, 0), (0, 0, 255), (0, 255, 255))
            h, w, chan = img.shape
            # normalize
            hist *= 1.0 / hist.max()
            for i, h in zip(bins, hist[:, 0]):
                c = colors[1]
                cv2.line(img, (w, int(i * sy)), (w - int(h * sx), int(i * sy)), c)
            cv2.line(img, (w, int(lowest_spike * sy)), (int(w - 0.5 * sx), int(lowest_spike * sy)), colors[0])
            cv2.line(
                img,
                (w, int((lowest_spike + offset) * sy)),
                (int(w - 0.5 * sx), int((lowest_spike + offset) * sy)),
                colors[2],
            )

        # # k-means on the histogram finds peaks but thats no good for us...
        # term_crit = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        # compactness, bestLabels, centers = cv2.kmeans(data=hist, K=2, criteria=term_crit, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
        # cv2.line(img,(0,1),(int(compactness),1),(0,0,0))
        # good_cluster = np.argmax(centers)
        # # A = hist[bestLabels.ravel() == good_cluster]
        # # B = hist[bestLabels.ravel() != good_cluster]
        # bins = np.arange(hist.shape[0])
        # good_bins = bins[bestLabels.ravel() == good_cluster]
        # good_bins_mean = good_bins.sum()/good_bins.shape[0]
        # good_bins_min = good_bins.min()

        # h,w,chan = img.shape
        # for h, i, label in zip(hist[:,0],range(hist.shape[0]), bestLabels.ravel()):
        #     c = colors[label]
        #     cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)

        else:
            # direct k-means on the image is best but expensive
            Z = pupil_img[:: w / 30 + 1, :: w / 30 + 1].reshape((-1, 1))
            Z = np.float32(Z)
            # define criteria, number of clusters(K) and apply kmeans()
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 2.0)
            K = 5
            ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
            offset = 0
            center.sort(axis=0)
            lowest_spike = int(center[1])
            # # Now convert back into uint8, and make original image
            # center = np.uint8(center)
            # res = center[label.flatten()]
            # binary_img = res.reshape((pupil_img.shape))
            # binary_img = bin_thresholding(binary_img,image_upper=res.min()+1)
            # bar.bin_thresh.value = res.min()+1

        bar.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img, image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        cv2.dilate(binary_img, kernel, binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=250)
        cv2.erode(spec_mask, kernel, spec_mask, iterations=1)

        if bar.blur.value > 1:
            pupil_img = cv2.medianBlur(pupil_img, bar.blur.value)

        # create contours using Canny edge dectetion
        contours = cv2.Canny(
            pupil_img,
            bar.canny_thresh.value,
            bar.canny_thresh.value * bar.canny_ratio.value,
            apertureSize=bar.canny_aperture.value,
        )

        # remove contours in areas not dark enough and where the glint (spectral refelction from IR leds)
        contours = cv2.min(contours, spec_mask)
        contours = cv2.min(contours, binary_img)

        # Ellipse fitting from countours
        result = fit_ellipse(
            img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX],
            contours,
            binary_img,
            target_size=bar.pupil_size.value,
            size_tolerance=bar.pupil_size_tolerance.value,
        )

        # # Vizualizations
        overlay = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB)  # create an RGB view onto the gray pupil ROI
        overlay[:, :, 0] = cv2.max(pupil_img, contours)  # green channel
        overlay[:, :, 2] = cv2.max(pupil_img, binary_img)  # blue channel
        overlay[:, :, 1] = cv2.min(pupil_img, spec_mask)  # red channel

        # draw a blue dotted frame around the automatic pupil ROI in overlay...
        overlay[::2, 0] = 0, 0, 255
        overlay[::2, -1] = 0, 0, 255
        overlay[0, ::2] = 0, 0, 255
        overlay[-1, ::2] = 0, 0, 255

        # and a solid (white) frame around the user defined ROI
        gray_img[:, 0] = 255
        gray_img[:, -1] = 255
        gray_img[0, :] = 255
        gray_img[-1, :] = 255

        if bar.display.value == 0:
            img = img
        elif bar.display.value == 1:
            img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
        elif bar.display.value == 2:
            img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
            img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX] = overlay
        elif bar.display.value == 3:
            img = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB)
        else:
            pass

        if result is not None:
            pupil.ellipse, others = result
            pupil.image_coords = r.add_vector(p_r.add_vector(pupil.ellipse["center"]))
            # update pupil size,angle and ratio for the ellipse filter algorithm
            bar.pupil_size.value = bar.pupil_size.value + 0.5 * (pupil.ellipse["major"] - bar.pupil_size.value)
            bar.pupil_ratio.value = bar.pupil_ratio.value + 0.7 * (pupil.ellipse["ratio"] - bar.pupil_ratio.value)
            bar.pupil_angle.value = bar.pupil_angle.value + 1.0 * (pupil.ellipse["angle"] - bar.pupil_angle.value)

            # if pupil found tighten the size tolerance
            bar.pupil_size_tolerance.value -= 1
            bar.pupil_size_tolerance.value = max(10, min(50, bar.pupil_size_tolerance.value))

            # clamp pupil size
            bar.pupil_size.value = max(20, min(300, bar.pupil_size.value))

            # normalize
            pupil.norm_coords = normalize(pupil.image_coords, (img.shape[1], img.shape[0]), flip_y=True)

            # from pupil to gaze
            pupil.gaze_coords = map_pupil(pupil.norm_coords)
            g_pool.gaze_x.value, g_pool.gaze_y.value = pupil.gaze_coords

        else:
            pupil.ellipse = None
            g_pool.gaze_x.value, g_pool.gaze_y.value = 0.0, 0.0
            pupil.gaze_coords = None  # whithout this line the last know pupil position is recorded if none is found

            bar.pupil_size_tolerance.value += 1

        ###CALIBRATION###
        # Initialize Calibration (setup variables and lists)
        if g_pool.calibrate.value and not l_pool.calib_running:
            l_pool.calib_running = True
            pupil.pt_cloud = []

        # While Calibrating...
        if l_pool.calib_running and ((g_pool.ref_x.value != 0) or (g_pool.ref_y.value != 0)) and pupil.ellipse:
            pupil.pt_cloud.append([pupil.norm_coords[0], pupil.norm_coords[1], g_pool.ref_x.value, g_pool.ref_y.value])

        # Calculate mapping coefs
        if not g_pool.calibrate.value and l_pool.calib_running:
            l_pool.calib_running = 0
            if pupil.pt_cloud:  # some data was actually collected
                print "Calibrating with", len(pupil.pt_cloud), "collected data points."
                map_pupil = get_map_from_cloud(np.array(pupil.pt_cloud), g_pool.world_size, verbose=True)
                np.save("cal_pt_cloud.npy", np.array(pupil.pt_cloud))

        ###RECORDING###
        # Setup variables and lists for recording
        if g_pool.pos_record.value and not l_pool.record_running:
            l_pool.record_path = g_pool.eye_rx.recv()
            print "l_pool.record_path: ", l_pool.record_path

            video_path = path.join(l_pool.record_path, "eye.avi")
            # FFV1 -- good speed lossless big file
            # DIVX -- good speed good compression medium file
            if bar.record_eye.value:
                l_pool.writer = cv2.VideoWriter(
                    video_path, cv2.cv.CV_FOURCC(*"DIVX"), bar.fps.value, (img.shape[1], img.shape[0])
                )
            l_pool.record_positions = []
            l_pool.record_running = True

        # While recording...
        if l_pool.record_running:
            if pupil.gaze_coords is not None:
                l_pool.record_positions.append(
                    [
                        pupil.gaze_coords[0],
                        pupil.gaze_coords[1],
                        pupil.norm_coords[0],
                        pupil.norm_coords[1],
                        bar.dt,
                        g_pool.frame_count_record.value,
                    ]
                )
            if l_pool.writer is not None:
                l_pool.writer.write(cv2.cvtColor(img, cv2.cv.COLOR_BGR2RGB))

        # Done Recording: Save values and flip switch to off for recording
        if not g_pool.pos_record.value and l_pool.record_running:
            positions_path = path.join(l_pool.record_path, "gaze_positions.npy")
            cal_pt_cloud_path = path.join(l_pool.record_path, "cal_pt_cloud.npy")
            np.save(positions_path, np.asarray(l_pool.record_positions))
            try:
                np.save(cal_pt_cloud_path, np.asarray(pupil.pt_cloud))
            except:
                print "Warning: No calibration data associated with this recording."
            l_pool.writer = None
            l_pool.record_running = False

        ### GL-drawing
        clear_gl_screen()
        draw_gl_texture(img)

        if bar.draw_pupil and pupil.ellipse:
            pts = cv2.ellipse2Poly(
                (int(pupil.image_coords[0]), int(pupil.image_coords[1])),
                (int(pupil.ellipse["axes"][0] / 2), int(pupil.ellipse["axes"][1] / 2)),
                int(pupil.ellipse["angle"]),
                0,
                360,
                15,
            )
            draw_gl_polyline(pts, (1.0, 0, 0, 0.5))
            draw_gl_point_norm(pupil.norm_coords, (1.0, 0.0, 0.0, 0.5))

        atb.draw()
        glfwSwapBuffers()

    # end while running
    print "EYE Process closed"
    r.save()
    bar.save()
    atb.terminate()
    glfwCloseWindow()
    glfwTerminate()
Exemplo n.º 45
0
def player_drop(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir,
                app_version, debug):
    # general imports
    import logging

    # networking
    import zmq
    import zmq_tools
    from time import sleep

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.INFO)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    try:
        import glfw
        import gl_utils
        from OpenGL.GL import glClearColor
        from version_utils import VersionFormat
        from file_methods import Persistent_Dict
        from pyglui.pyfontstash import fontstash
        from pyglui.ui import get_roboto_font_path
        import player_methods as pm
        from pupil_recording import (
            assert_valid_recording_type,
            InvalidRecordingException,
        )
        from pupil_recording.update import update_recording

        process_was_interrupted = False

        def interrupt_handler(sig, frame):
            import traceback

            trace = traceback.format_stack(f=frame)
            logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
            nonlocal process_was_interrupted
            process_was_interrupted = True

        signal.signal(signal.SIGINT, interrupt_handler)

        def on_drop(window, count, paths):
            nonlocal rec_dir
            rec_dir = paths[0].decode("utf-8")

        if rec_dir:
            try:
                assert_valid_recording_type(rec_dir)
            except InvalidRecordingException as err:
                logger.error(str(err))
                rec_dir = None
        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(user_dir, "user_settings_player"))
        if VersionFormat(session_settings.get("version",
                                              "0.0")) != app_version:
            logger.info(
                "Session setting are from a  different version of this app. I will not use those."
            )
            session_settings.clear()
        w, h = session_settings.get("window_size", (1280, 720))
        window_pos = session_settings.get("window_position",
                                          window_position_default)

        glfw.glfwInit()
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 0)
        window = glfw.glfwCreateWindow(w, h, "Pupil Player")
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 1)

        glfw.glfwMakeContextCurrent(window)
        glfw.glfwSetWindowPos(window, window_pos[0], window_pos[1])
        glfw.glfwSetDropCallback(window, on_drop)

        glfont = fontstash.Context()
        glfont.add_font("roboto", get_roboto_font_path())
        glfont.set_align_string(v_align="center", h_align="middle")
        glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
        gl_utils.basic_gl_setup()
        glClearColor(0.5, 0.5, 0.5, 0.0)
        text = "Drop a recording directory onto this window."
        tip = "(Tip: You can drop a recording directory onto the app icon.)"

        # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."

        def display_string(string, font_size, center_y):
            x = w / 2 * hdpi_factor
            y = center_y * hdpi_factor

            glfont.set_size(font_size * hdpi_factor)

            glfont.set_blur(10.5)
            glfont.set_color_float((0.0, 0.0, 0.0, 1.0))
            glfont.draw_text(x, y, string)

            glfont.set_blur(0.96)
            glfont.set_color_float((1.0, 1.0, 1.0, 1.0))
            glfont.draw_text(x, y, string)

        while not glfw.glfwWindowShouldClose(
                window) and not process_was_interrupted:

            fb_size = glfw.glfwGetFramebufferSize(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            gl_utils.adjust_gl_view(*fb_size)

            if rec_dir:
                try:
                    assert_valid_recording_type(rec_dir)
                    logger.info(
                        "Starting new session with '{}'".format(rec_dir))
                    text = "Updating recording format."
                    tip = "This may take a while!"
                except InvalidRecordingException as err:
                    logger.error(str(err))
                    if err.recovery:
                        text = err.reason
                        tip = err.recovery
                    else:
                        text = "Invalid recording"
                        tip = err.reason
                    rec_dir = None

            gl_utils.clear_gl_screen()

            display_string(text, font_size=51, center_y=216)
            for idx, line in enumerate(tip.split("\n")):
                tip_font_size = 42
                center_y = 288 + tip_font_size * idx * 1.2
                display_string(line,
                               font_size=tip_font_size,
                               center_y=center_y)

            glfw.glfwSwapBuffers(window)

            if rec_dir:
                try:
                    update_recording(rec_dir)
                except AssertionError as err:
                    logger.error(str(err))
                    tip = "Oops! There was an error updating the recording."
                    rec_dir = None
                except InvalidRecordingException as err:
                    logger.error(str(err))
                    if err.recovery:
                        text = err.reason
                        tip = err.recovery
                    else:
                        text = "Invalid recording"
                        tip = err.reason
                    rec_dir = None
                else:
                    glfw.glfwSetWindowShouldClose(window, True)

            glfw.glfwPollEvents()

        session_settings["window_position"] = glfw.glfwGetWindowPos(window)
        session_settings.close()
        glfw.glfwDestroyWindow(window)
        if rec_dir:
            ipc_pub.notify({
                "subject": "player_process.should_start",
                "rec_dir": rec_dir
            })

    except Exception:
        import traceback

        trace = traceback.format_exc()
        logger.error(
            "Process player_drop crashed with trace:\n{}".format(trace))

    finally:
        sleep(1.0)
Exemplo n.º 46
0
def player_drop(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir,
                app_version):
    # general imports
    import logging
    # networking
    import zmq
    import zmq_tools
    from time import sleep

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.INFO)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    try:

        import glfw
        import gl_utils
        from OpenGL.GL import glClearColor
        from version_utils import VersionFormat
        from file_methods import Persistent_Dict
        from pyglui.pyfontstash import fontstash
        from pyglui.ui import get_roboto_font_path
        from player_methods import is_pupil_rec_dir, update_recording_to_recent

        def on_drop(window, count, paths):
            nonlocal rec_dir
            rec_dir = paths[0].decode('utf-8')

        if rec_dir:
            if not is_pupil_rec_dir(rec_dir):
                rec_dir = None
        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(user_dir, "user_settings_player"))
        if VersionFormat(session_settings.get("version",
                                              '0.0')) != app_version:
            logger.info(
                "Session setting are from a  different version of this app. I will not use those."
            )
            session_settings.clear()
        w, h = session_settings.get('window_size', (1280, 720))
        window_pos = session_settings.get('window_position',
                                          window_position_default)

        glfw.glfwInit()
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 0)
        window = glfw.glfwCreateWindow(w, h, 'Pupil Player')
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 1)

        glfw.glfwMakeContextCurrent(window)
        glfw.glfwSetWindowPos(window, window_pos[0], window_pos[1])
        glfw.glfwSetDropCallback(window, on_drop)

        glfont = fontstash.Context()
        glfont.add_font('roboto', get_roboto_font_path())
        glfont.set_align_string(v_align="center", h_align="middle")
        glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
        gl_utils.basic_gl_setup()
        glClearColor(0.5, .5, 0.5, 0.0)
        text = 'Drop a recording directory onto this window.'
        tip = '(Tip: You can drop a recording directory onto the app icon.)'
        # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
        while not glfw.glfwWindowShouldClose(window):

            fb_size = glfw.glfwGetFramebufferSize(window)
            hdpi_factor = float(fb_size[0] / glfw.glfwGetWindowSize(window)[0])
            gl_utils.adjust_gl_view(*fb_size)

            if rec_dir:
                if is_pupil_rec_dir(rec_dir):
                    logger.info(
                        "Starting new session with '{}'".format(rec_dir))
                    text = "Updating recording format."
                    tip = "This may take a while!"
                else:
                    logger.error(
                        "'{}' is not a valid pupil recording".format(rec_dir))
                    tip = "Oops! That was not a valid recording."
                    rec_dir = None

            gl_utils.clear_gl_screen()
            glfont.set_blur(10.5)
            glfont.set_color_float((0.0, 0.0, 0.0, 1.))
            glfont.set_size(w / 25. * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, .3 * h * hdpi_factor, text)
            glfont.set_size(w / 30. * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, .4 * h * hdpi_factor, tip)
            glfont.set_blur(0.96)
            glfont.set_color_float((1., 1., 1., 1.))
            glfont.set_size(w / 25. * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, .3 * h * hdpi_factor, text)
            glfont.set_size(w / 30. * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, .4 * h * hdpi_factor, tip)

            glfw.glfwSwapBuffers(window)

            if rec_dir:
                update_recording_to_recent(rec_dir)
                glfw.glfwSetWindowShouldClose(window, True)

            glfw.glfwPollEvents()

        session_settings['window_position'] = glfw.glfwGetWindowPos(window)
        session_settings.close()
        glfw.glfwDestroyWindow(window)
        if rec_dir:
            ipc_pub.notify({
                "subject": "player_process.should_start",
                "rec_dir": rec_dir
            })

    except:
        import traceback
        trace = traceback.format_exc()
        logger.error(
            'Process player_drop crashed with trace:\n{}'.format(trace))

    finally:
        sleep(1.0)
Exemplo n.º 47
0
def world(g_pool):
    """world
    """

    # Callback functions
    def on_resize(w, h):
        atb.TwWindowSize(w, h);
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key,pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char,pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button,pressed):
            if pressed:
                pos = glfwGetMousePos()
                pos = normalize(pos,glfwGetWindowSize())
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                for p in g.plugins:
                    p.on_click(pos)

    def on_pos(x, y):
        if atb.TwMouseMotion(x,y):
            pass

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "WORLD Process closing from window"





    # load session persistent settings
    session_settings = shelve.open('user_settings_world',protocol=2)
    def load(var_name,default):
        try:
            return session_settings[var_name]
        except:
            return default
    def save(var_name,var):
        session_settings[var_name] = var


    # gaze object
    gaze = Temp()
    gaze.map_coords = (0., 0.)
    gaze.image_coords = (0., 0.)

    # Initialize capture, check if it works
    cap = autoCreateCapture(g_pool.world_src, g_pool.world_size,24)
    if cap is None:
        print "WORLD: Error could not create Capture"
        return
    frame = cap.get_frame()
    if frame.img is None:
        print "WORLD: Error could not get image"
        return
    height,width = frame.img.shape[:2]


    # helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1 / dt - bar.fps.value)

    def set_window_size(mode,data):
        height,width = frame.img.shape[:2]
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(w,h)
        data.value=mode # update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    def open_calibration(selection,data):
        # prepare destruction of old ref_detector.
        if g.current_ref_detector:
            g.current_ref_detector.alive = False

        # remove old ref detector from list of plugins
        g.plugins = [p for p in g.plugins if p.alive]

        print "selected: ",reference_detectors.name_by_index[selection]
        g.current_ref_detector = reference_detectors.detector_by_index[selection](global_calibrate=g_pool.calibrate,
                                                                    shared_pos=g_pool.ref,
                                                                    screen_marker_pos = g_pool.marker,
                                                                    screen_marker_state = g_pool.marker_state,
                                                                    atb_pos=bar.next_atb_pos)

        g.plugins.append(g.current_ref_detector)
        # save the value for atb bar
        data.value=selection

    def toggle_record_video():
        if any([True for p in g.plugins if isinstance(p,recorder.Recorder)]):
            for p in g.plugins:
                if isinstance(p,recorder.Recorder):
                    p.alive = False
        else:
            # set up folder within recordings named by user input in atb
            if not bar.rec_name.value:
                bar.rec_name.value = recorder.get_auto_name()
            recorder_instance = recorder.Recorder(bar.rec_name.value, bar.fps.value, frame.img.shape, g_pool.pos_record, g_pool.eye_tx)
            g.plugins.append(recorder_instance)

    def toggle_show_calib_result():
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False
        else:
            calib = Show_Calibration(frame.img.shape)
            g.plugins.append(calib)

    def show_calib_result():
        # kill old if any
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False
            g.plugins = [p for p in g.plugins if p.alive]
        # make new
        calib = Show_Calibration(frame.img.shape)
        g.plugins.append(calib)

    def hide_calib_result():
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False

    # Initialize ant tweak bar - inherits from atb.Bar
    atb.init()
    bar = atb.Bar(name = "World", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,valueswidth=150,
            text='light', position=(10, 10),refresh=.3, size=(300, 200))
    bar.next_atb_pos = (10,220)
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.calibration_type = c_int(load("calibration_type",0))
    bar.show_calib_result = c_bool(0)
    bar.record_video = c_bool(0)
    bar.record_running = c_bool(0)
    bar.play = g_pool.play
    bar.window_size = c_int(load("window_size",0))
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})

    bar.calibrate_type_enum = atb.enum("Calibration Method",reference_detectors.index_by_name)
    bar.rec_name = create_string_buffer(512)
    bar.rec_name.value = recorder.get_auto_name()
    # play and record can be tied together via pointers to the objects
    # bar.play = bar.record_video
    bar.add_var("fps", bar.fps, step=1., readonly=True)
    bar.add_var("display size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("calibration method",setter=open_calibration,getter=get_from_data,data=bar.calibration_type, vtype=bar.calibrate_type_enum,group="Calibration", help="Please choose your desired calibration method.")
    bar.add_button("show calibration result",toggle_show_calib_result, group="Calibration", help="Click to show calibration result.")
    bar.add_var("session name",bar.rec_name, group="Recording", help="creates folder Data_Name_XXX, where xxx is an increasing number")
    bar.add_button("record", toggle_record_video, key="r", group="Recording", help="Start/Stop Recording")
    bar.add_separator("Sep1")
    bar.add_var("play video", bar.play, help="play a video in the Player window")
    bar.add_var("exit", g_pool.quit)

    # add uvc camera controls to a seperate ATB bar
    cap.create_atb_bar(pos=(320,10))


    # create container for globally scoped vars (within world)
    g = Temp()
    g.plugins = []
    g.current_ref_detector = None
    open_calibration(bar.calibration_type.value,bar.calibration_type)

    # Initialize glfw
    glfwInit()
    height,width = frame.img.shape[:2]
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("World")
    glfwSetWindowPos(0,0)

    #set the last saved window size
    set_window_size(bar.window_size.value,bar.window_size)

    # Register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    # gl_state settings
    import OpenGL.GL as gl
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    del gl

    # Event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        # Get input characters entered in player
        if g_pool.player_input.value:
            player_input = g_pool.player_input.value
            g_pool.player_input.value = 0
            on_char(player_input,True)

        # Get an image from the grabber
        frame = cap.get_frame()
        update_fps()

        for p in g.plugins:
            p.update(frame)

        g.plugins = [p for p in g.plugins if p.alive]

        g_pool.player_refresh.set()

        # render the screen
        clear_gl_screen()
        draw_gl_texture(frame.img)

        # render visual feedback from loaded plugins
        for p in g.plugins:
            p.gl_display()


        # update gaze point from shared variable pool and draw on screen. If both coords are 0: no pupil pos was detected.
        if not g_pool.gaze[:] == [0.,0.]:
            draw_gl_point_norm(g_pool.gaze[:],color=(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers()


    # end while running and clean-up

    # de-init all running plugins
    for p in g.plugins:
        p.alive = False
    g.plugins = [p for p in g.plugins if p.alive]

    save('window_size',bar.window_size.value)
    save('calibration_type',bar.calibration_type.value)
    session_settings.close()

    cap.close()
    glfwCloseWindow()
    glfwTerminate()
    print "WORLD Process closed"
Exemplo n.º 48
0
Arquivo: eye.py Projeto: r0bert0/pupil
def eye(g_pool,cap_src,cap_size):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir,'eye.log'),mode='w')
    fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARNING)
    # create formatter and add it to the handlers
    formatter = logging.Formatter('EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter('E Y E Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)


    # Callback functions
    def on_resize(window,w, h):
        adjust_gl_view(w,h,window)
        norm_size = normalize((w,h),glfwGetWindowSize(window))
        fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
        atb.TwWindowSize(*map(int,fb_size))


    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key,int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                if key == GLFW_KEY_ESCAPE:
                    on_close(window)

    def on_char(window,char):
        if not atb.TwEventCharGLFW(char,1):
            pass

    def on_button(window,button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button,int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels
                u_r.setStart(pos)
                bar.draw_roi.value = 1
            else:
                bar.draw_roi.value = 0

    def on_pos(window,x, y):
        norm_pos = normalize((x,y),glfwGetWindowSize(window))
        fb_x,fb_y = denormalize(norm_pos,glfwGetFramebufferSize(window))
        if atb.TwMouseMotion(int(fb_x),int(fb_y)):
            pass

        if bar.draw_roi.value == 1:
            pos = denormalize(norm_pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels
            u_r.setEnd(pos)

    def on_scroll(window,x,y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')


    # Helper functions called by the main atb bar
    def start_roi():
        bar.display.value = 1
        bar.draw_roi.value = 2

    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1. / dt - bar.fps.value)
            bar.dt.value = dt

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value


    # load session persistent settings
    session_settings = shelve.open(os.path.join(g_pool.user_dir,'user_settings_eye'),protocol=2)
    def load(var_name,default):
        return session_settings.get(var_name,default)
    def save(var_name,var):
        session_settings[var_name] = var

    # Initialize capture
    cap = autoCreateCapture(cap_src, cap_size,timebase=g_pool.timebase)

    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    # check if it works
    frame = cap.get_frame()
    if frame.img is None:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return
    height,width = frame.img.shape[:2]

    u_r = Roi(frame.img.shape)
    u_r.set(load('roi',default=None))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    atb.init()
    # Create main ATB Controls
    bar = atb.Bar(name = "Eye", label="Display",
            help="Scene controls", color=(50, 50, 50), alpha=100,
            text='light', position=(10, 10),refresh=.3, size=(200, 100))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.dt = c_float(0.0)
    bar.sleep = c_float(0.0)
    bar.display = c_int(load('bar.display',0))
    bar.draw_pupil = c_bool(load('bar.draw_pupil',True))
    bar.draw_roi = c_int(0)

    dispay_mode_enum = atb.enum("Mode",{"Camera Image":0,
                                        "Region of Interest":1,
                                        "Algorithm":2,
                                        "CPU Save": 3})

    bar.add_var("FPS",bar.fps, step=1.,readonly=True)
    bar.add_var("Mode", bar.display,vtype=dispay_mode_enum, help="select the view-mode")
    bar.add_var("Show_Pupil_Point", bar.draw_pupil)
    bar.add_button("Draw_ROI", start_roi, help="drag on screen to select a region of interest")

    bar.add_var("SlowDown",bar.sleep, step=0.01,min=0.0)
    bar.add_var("SaveSettings&Exit", g_pool.quit)

    cap.create_atb_bar(pos=(220,10))

    # create a bar for the detector
    pupil_detector.create_atb_bar(pos=(10,120))


    glfwInit()
    window = glfwCreateWindow(width, height, "Eye", None, None)
    glfwMakeContextCurrent(window)

    # Register callbacks window
    glfwSetWindowSizeCallback(window,on_resize)
    glfwSetWindowCloseCallback(window,on_close)
    glfwSetKeyCallback(window,on_key)
    glfwSetCharCallback(window,on_char)
    glfwSetMouseButtonCallback(window,on_button)
    glfwSetCursorPosCallback(window,on_pos)
    glfwSetScrollCallback(window,on_scroll)

    glfwSetWindowPos(window,800,0)
    on_resize(window,width,height)

    # gl_state settings
    basic_gl_setup()

    # refresh speed settings
    glfwSwapInterval(0)


    # event loop
    while not g_pool.quit.value:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Capture from Camera Failed. Stopping.")
            break
        except EndofVideoFileError:
            logger.warning("Video File is done. Stopping")
            break

        update_fps()
        sleep(bar.sleep.value) # for debugging only


        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if g_pool.eye_rx.poll():
            command = g_pool.eye_rx.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %s"%record_path)
                video_path = os.path.join(record_path, "eye.avi")
                timestamps_path = os.path.join(record_path, "eye_timestamps.npy")
                writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (frame.img.shape[1], frame.img.shape[0]))
                timestamps = []
            else:
                logger.info("Done recording eye.")
                writer = None
                np.save(timestamps_path,np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write(frame.img)
            timestamps.append(frame.timestamp)


        # pupil ellipse detection
        result = pupil_detector.detect(frame,user_roi=u_r,visualize=bar.display.value == 2)
        # stream the result
        g_pool.pupil_queue.put(result)

        # VISUALIZATION direct visualizations on the frame.img data
        if bar.display.value == 1:
            # and a solid (white) frame around the user defined ROI
            r_img = frame.img[u_r.lY:u_r.uY,u_r.lX:u_r.uX]
            r_img[:,0] = 255,255,255
            r_img[:,-1]= 255,255,255
            r_img[0,:] = 255,255,255
            r_img[-1,:]= 255,255,255



        # GL-drawing
        clear_gl_screen()
        draw_gl_texture(frame.img,update=bar.display.value != 3)

        if result['norm_pupil'] is not None and bar.draw_pupil.value:
            if result.has_key('axes'):
                pts = cv2.ellipse2Poly( (int(result['center'][0]),int(result['center'][1])),
                                        (int(result["axes"][0]/2),int(result["axes"][1]/2)),
                                        int(result["angle"]),0,360,15)
                draw_gl_polyline(pts,(1.,0,0,.5))
            draw_gl_point_norm(result['norm_pupil'],color=(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers(window)
        glfwPollEvents()

    # END while running

    # in case eye reconding was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path,np.asarray(timestamps))


    # save session persistent settings
    save('roi',u_r.get())
    save('bar.display',bar.display.value)
    save('bar.draw_pupil',bar.draw_pupil.value)
    session_settings.close()

    pupil_detector.cleanup()
    cap.close()
    atb.terminate()
    glfwDestroyWindow(window)
    glfwTerminate()

    #flushing queue incase world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
Exemplo n.º 49
0
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
        user_dir, version, eye_id, cap_src):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        #logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen, make_coord_system_pixel_based, make_coord_system_norm_based, make_coord_system_eye_camera_based
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math

        # helpers/utils
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
        from av_writer import JPEG_Writer, AV_Writer

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D
        }

        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600, 31 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.timebase = timebase

        # Callback functions
        def on_resize(window, w, h):
            if not g_pool.iconified:
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w, h)
                graph.adjust_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return  # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    pos = denormalize(
                        pos,
                        (frame.width, frame.height))  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(
                            pos, g_pool.u_r.handle_size + 40,
                            g_pool.u_r.handle_size + 40):
                        return  # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = float(
                glfw.glfwGetFramebufferSize(window)[0] /
                glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, (frame.width, frame.height))
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, 'user_settings_eye%s' % eye_id))
        if session_settings.get("version",
                                VersionFormat('0.0')) < g_pool.version:
            logger.info(
                "Session setting are from older version of this app. I will not use those."
            )
            session_settings.clear()
        # Initialize capture
        cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
        default_settings = {'frame_size': (640, 480), 'frame_rate': 60}
        previous_settings = session_settings.get('capture_settings', None)
        if previous_settings and previous_settings['name'] == cap.name:
            cap.settings = previous_settings
        else:
            cap.settings = default_settings

        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get('display_mode',
                                                   'camera_image')
        g_pool.display_mode_info_text = {
            'camera_image':
            "Raw eye camera image. This uses the least amount of CPU power",
            'roi':
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            'algorithm':
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."
        }

        g_pool.u_r = UIRoi((cap.frame_size[1], cap.frame_size[0]))
        g_pool.u_r.set(session_settings.get('roi', g_pool.u_r.get()))

        def on_frame_size_change(new_size):
            g_pool.u_r = UIRoi((new_size[1], new_size[0]))

        cap.on_frame_size_change = on_frame_size_change

        writer = None

        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,
                                                    pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s" % eye_id
        width, height = session_settings.get('window_size', cap.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get('window_position',
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        glfw.glfwSwapInterval(0)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale', 1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(
            ui.Slider('scale',
                      g_pool.gui,
                      setter=set_scale,
                      step=.05,
                      min=1.,
                      max=2.5,
                      label='Interface Size'))
        general_settings.append(
            ui.Button(
                'Reset window size', lambda: glfw.glfwSetWindowSize(
                    main_window, frame.width, frame.height)))
        general_settings.append(
            ui.Switch('flip', g_pool, label='Flip image display'))
        general_settings.append(
            ui.Selector('display_mode',
                        g_pool,
                        setter=set_display_mode_info,
                        selection=['camera_image', 'roi', 'algorithm'],
                        labels=['Camera Image', 'ROI', 'Algorithm'],
                        label="Mode"))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.sidebar.append(general_settings)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector(
            'pupil_detector',
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_2D, Detector_3D],
            labels=['C++ 2d detector', 'C++ 3d detector'],
            label="Detection method")
        general_settings.append(detector_selector)

        # let detector add its GUI
        g_pool.pupil_detector.init_gui(g_pool.sidebar)
        # let the camera add its GUI
        g_pool.capture.init_gui(g_pool.sidebar)

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_key)
        glfw.glfwSetCharCallback(main_window, on_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = cap.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"

        #create a timer to control window update frequency
        window_update_timer = timer(1 / 60.)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye']:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: %s" % record_path)
                        timestamps_path = os.path.join(
                            record_path, "eye%s_timestamps.npy" % eye_id)
                        if raw_mode and frame.jpeg_buffer:
                            video_path = os.path.join(record_path,
                                                      "eye%s.mp4" % eye_id)
                            writer = JPEG_Writer(video_path, cap.frame_rate)
                        else:
                            video_path = os.path.join(record_path,
                                                      "eye%s.mp4" % eye_id)
                            writer = AV_Writer(video_path, cap.frame_rate)
                        timestamps = []
                elif subject == 'recording.stopped':
                    if writer:
                        logger.info("Done recording.")
                        writer.release()
                        writer = None
                        np.save(timestamps_path, np.asarray(timestamps))
                        del timestamps
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye%i' % eye_id,
                        'doc': eye.__doc__
                    })

            # Get an image from the grabber
            try:
                frame = cap.get_frame()
            except CameraCaptureError:
                logger.error("Capture from Camera Failed. Stopping.")
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                cap.seek_to_frame(0)
                frame = cap.get_frame()

            #update performace graphs
            t = frame.timestamp
            dt, ts = t - ts, t
            try:
                fps_graph.add(1. / dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()

            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)

            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(
                frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            pupil_socket.send('pupil.%s' % eye_id, result)

            # GL drawing
            if window_should_update():
                if not g_pool.iconified:
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image', 'roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    window_size = glfw.glfwGetWindowSize(main_window)
                    make_coord_system_pixel_based(
                        (frame.height, frame.width, 3), g_pool.flip)

                    if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly(
                                (int(eye_ball['center'][0]),
                                 int(eye_ball['center'][1])),
                                (int(eye_ball['axes'][0] / 2),
                                 int(eye_ball['axes'][1] / 2)),
                                int(eye_ball['angle']), 0, 360, 8)
                        except ValueError as e:
                            pass
                        else:
                            draw_polyline(
                                pts, 2,
                                RGBA(0., .9, .1, result['model_confidence']))

                    if result['confidence'] > 0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly(
                                (int(result['ellipse']['center'][0]),
                                 int(result['ellipse']['center'][1])),
                                (int(result['ellipse']['axes'][0] / 2),
                                 int(result['ellipse']['axes'][1] / 2)),
                                int(result['ellipse']['angle']), 0, 360, 15)
                            confidence = result[
                                'confidence'] * 0.7  #scale it a little
                            draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
                            draw_points([result['ellipse']['center']],
                                        size=20,
                                        color=RGBA(1., 0., 0., confidence),
                                        sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize(
                )  #detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path, np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window)  #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(
            main_window)
        session_settings['version'] = g_pool.version
        session_settings[
            'last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            'pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        cap.close()
        logger.info("Process shutting down.")
Exemplo n.º 50
0
def player_drop(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir,
                app_version, debug):
    # general imports
    import logging

    # networking
    import zmq
    import zmq_tools
    from time import sleep

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.INFO)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    try:
        import glfw
        from gl_utils import GLFWErrorReporting

        GLFWErrorReporting.set_default()

        import gl_utils
        from OpenGL.GL import glClearColor
        from version_utils import parse_version
        from file_methods import Persistent_Dict
        from pyglui.pyfontstash import fontstash
        from pyglui.ui import get_roboto_font_path
        import player_methods as pm
        from pupil_recording import (
            assert_valid_recording_type,
            InvalidRecordingException,
        )
        from pupil_recording.update import update_recording

        process_was_interrupted = False

        def interrupt_handler(sig, frame):
            import traceback

            trace = traceback.format_stack(f=frame)
            logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
            nonlocal process_was_interrupted
            process_was_interrupted = True

        signal.signal(signal.SIGINT, interrupt_handler)

        def on_drop(window, paths):
            nonlocal rec_dir
            rec_dir = paths[0]

        if rec_dir:
            try:
                assert_valid_recording_type(rec_dir)
            except InvalidRecordingException as err:
                logger.error(str(err))
                rec_dir = None
        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(user_dir, "user_settings_player"))
        if parse_version(session_settings.get("version",
                                              "0.0")) != app_version:
            logger.info(
                "Session setting are from a  different version of this app. I will not use those."
            )
            session_settings.clear()
        w, h = session_settings.get("window_size", (1280, 720))

        glfw.init()
        glfw.window_hint(glfw.SCALE_TO_MONITOR, glfw.TRUE)
        glfw.window_hint(glfw.RESIZABLE, 0)
        window = glfw.create_window(w, h, "Pupil Player", None, None)
        glfw.window_hint(glfw.RESIZABLE, 1)

        glfw.make_context_current(window)

        window_position_manager = gl_utils.WindowPositionManager()
        window_pos = window_position_manager.new_window_position(
            window=window,
            default_position=window_position_default,
            previous_position=session_settings.get("window_position", None),
        )
        glfw.set_window_pos(window, window_pos[0], window_pos[1])

        glfw.set_drop_callback(window, on_drop)

        glfont = fontstash.Context()
        glfont.add_font("roboto", get_roboto_font_path())
        glfont.set_align_string(v_align="center", h_align="middle")
        glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
        gl_utils.basic_gl_setup()
        glClearColor(0.5, 0.5, 0.5, 0.0)
        text = "Drop a recording directory onto this window."
        tip = "(Tip: You can drop a recording directory onto the app icon.)"

        # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."

        def display_string(string, font_size, center_y):
            x = w / 2 * content_scale
            y = center_y * content_scale

            glfont.set_size(font_size * content_scale)

            glfont.set_blur(10.5)
            glfont.set_color_float((0.0, 0.0, 0.0, 1.0))
            glfont.draw_text(x, y, string)

            glfont.set_blur(0.96)
            glfont.set_color_float((1.0, 1.0, 1.0, 1.0))
            glfont.draw_text(x, y, string)

        def display_multiline_string(string, font_size, top_y, split_chr="\n"):
            for idx, line in enumerate(string.split(split_chr)):
                center_y = top_y + font_size * idx * 1.2
                display_string(line, font_size=font_size, center_y=center_y)
            bottom_y = top_y + font_size * (idx + 1) * 1.2
            return bottom_y

        while not glfw.window_should_close(
                window) and not process_was_interrupted:

            fb_size = glfw.get_framebuffer_size(window)
            content_scale = gl_utils.get_content_scale(window)
            gl_utils.adjust_gl_view(*fb_size)

            if rec_dir:
                try:
                    assert_valid_recording_type(rec_dir)
                    logger.info(
                        "Starting new session with '{}'".format(rec_dir))
                    text = "Updating recording format."
                    tip = "This may take a while!"
                except InvalidRecordingException as err:
                    logger.error(str(err))
                    if err.recovery:
                        text = err.reason
                        tip = err.recovery
                    else:
                        text = "Invalid recording"
                        tip = err.reason
                    rec_dir = None

            gl_utils.clear_gl_screen()

            top_y = display_multiline_string(text, font_size=51, top_y=216)
            display_multiline_string(tip, font_size=42, top_y=top_y + 50)

            glfw.swap_buffers(window)

            if rec_dir:
                try:
                    update_recording(rec_dir)
                except AssertionError as err:
                    logger.error(str(err))
                    tip = "Oops! There was an error updating the recording."
                    rec_dir = None
                except InvalidRecordingException as err:
                    logger.error(str(err))
                    if err.recovery:
                        text = err.reason
                        tip = err.recovery
                    else:
                        text = "Invalid recording"
                        tip = err.reason
                    rec_dir = None
                else:
                    glfw.set_window_should_close(window, True)

            glfw.poll_events()

        session_settings["window_position"] = glfw.get_window_pos(window)
        session_settings.close()
        glfw.destroy_window(window)
        if rec_dir:
            ipc_pub.notify({
                "subject": "player_process.should_start",
                "rec_dir": rec_dir
            })

    except Exception:
        import traceback

        trace = traceback.format_exc()
        logger.error(
            "Process player_drop crashed with trace:\n{}".format(trace))

    finally:
        sleep(1.0)
Exemplo n.º 51
0
def player_drop(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version):
    # general imports
    import logging

    # networking
    import zmq
    import zmq_tools
    from time import sleep

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.INFO)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    try:

        import glfw
        import gl_utils
        from OpenGL.GL import glClearColor
        from version_utils import VersionFormat
        from file_methods import Persistent_Dict
        from pyglui.pyfontstash import fontstash
        from pyglui.ui import get_roboto_font_path
        import player_methods as pm
        import update_methods as um

        def on_drop(window, count, paths):
            nonlocal rec_dir
            rec_dir = paths[0].decode("utf-8")

        if rec_dir:
            if not pm.is_pupil_rec_dir(rec_dir):
                rec_dir = None
        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(user_dir, "user_settings_player")
        )
        if VersionFormat(session_settings.get("version", "0.0")) != app_version:
            logger.info(
                "Session setting are from a  different version of this app. I will not use those."
            )
            session_settings.clear()
        w, h = session_settings.get("window_size", (1280, 720))
        window_pos = session_settings.get("window_position", window_position_default)

        glfw.glfwInit()
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 0)
        window = glfw.glfwCreateWindow(w, h, "Pupil Player")
        glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, 1)

        glfw.glfwMakeContextCurrent(window)
        glfw.glfwSetWindowPos(window, window_pos[0], window_pos[1])
        glfw.glfwSetDropCallback(window, on_drop)

        glfont = fontstash.Context()
        glfont.add_font("roboto", get_roboto_font_path())
        glfont.set_align_string(v_align="center", h_align="middle")
        glfont.set_color_float((0.2, 0.2, 0.2, 0.9))
        gl_utils.basic_gl_setup()
        glClearColor(0.5, 0.5, 0.5, 0.0)
        text = "Drop a recording directory onto this window."
        tip = "(Tip: You can drop a recording directory onto the app icon.)"
        # text = "Please supply a Pupil recording directory as first arg when calling Pupil Player."
        while not glfw.glfwWindowShouldClose(window):

            fb_size = glfw.glfwGetFramebufferSize(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            gl_utils.adjust_gl_view(*fb_size)

            if rec_dir:
                if pm.is_pupil_rec_dir(rec_dir):
                    logger.info("Starting new session with '{}'".format(rec_dir))
                    text = "Updating recording format."
                    tip = "This may take a while!"
                else:
                    logger.error("'{}' is not a valid pupil recording".format(rec_dir))
                    tip = "Oops! That was not a valid recording."
                    rec_dir = None

            gl_utils.clear_gl_screen()
            glfont.set_blur(10.5)
            glfont.set_color_float((0.0, 0.0, 0.0, 1.0))
            glfont.set_size(w / 25.0 * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, 0.3 * h * hdpi_factor, text)
            glfont.set_size(w / 30.0 * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, 0.4 * h * hdpi_factor, tip)
            glfont.set_blur(0.96)
            glfont.set_color_float((1.0, 1.0, 1.0, 1.0))
            glfont.set_size(w / 25.0 * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, 0.3 * h * hdpi_factor, text)
            glfont.set_size(w / 30.0 * hdpi_factor)
            glfont.draw_text(w / 2 * hdpi_factor, 0.4 * h * hdpi_factor, tip)

            glfw.glfwSwapBuffers(window)

            if rec_dir:
                try:
                    um.update_recording_to_recent(rec_dir)
                except AssertionError as err:
                    logger.error(str(err))
                    rec_dir = None
                else:
                    glfw.glfwSetWindowShouldClose(window, True)

            glfw.glfwPollEvents()

        session_settings["window_position"] = glfw.glfwGetWindowPos(window)
        session_settings.close()
        glfw.glfwDestroyWindow(window)
        if rec_dir:
            ipc_pub.notify(
                {"subject": "player_process.should_start", "rec_dir": rec_dir}
            )

    except:
        import traceback

        trace = traceback.format_exc()
        logger.error("Process player_drop crashed with trace:\n{}".format(trace))

    finally:
        sleep(1.0)
Exemplo n.º 52
0
def eye(pupil_queue, timebase, pipe_to_world, is_alive_flag, user_dir, version,
        eye_id, cap_src):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """
    is_alive = Is_Alive_Manager(is_alive_flag)
    with is_alive:
        import logging
        # Set up root logger for this process before doing imports of logged modules.
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        # remove inherited handlers
        logger.handlers = []
        # create file handler which logs even debug messages
        fh = logging.FileHandler(os.path.join(user_dir, 'eye%s.log' % eye_id),
                                 mode='w')
        # fh.setLevel(logging.DEBUG)
        # create console handler with a higher log level
        ch = logging.StreamHandler()
        ch.setLevel(logger.level + 10)
        # create formatter and add it to the handlers
        formatter = logging.Formatter(
            'Eye' + str(eye_id) +
            ' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        formatter = logging.Formatter(
            'EYE' + str(eye_id) +
            ' Process [%(levelname)s] %(name)s : %(message)s')
        ch.setFormatter(formatter)
        # add the handlers to the logger
        logger.addHandler(fh)
        logger.addHandler(ch)
        #silence noisy modules
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logging.getLogger("libav").setLevel(logging.ERROR)
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # We deferr the imports becasue of multiprocessing.
        # Otherwise the world process each process also loads the other imports.

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture
        from OpenGL.GL import GL_LINE_LOOP
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen, make_coord_system_pixel_based, make_coord_system_norm_based
        from ui_roi import UIRoi
        #monitoring
        import psutil

        # helpers/utils
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
        from av_writer import JPEG_Writer, AV_Writer

        # Pupil detectors
        from pupil_detectors import Canny_Detector, Detector_2D, Detector_3D
        pupil_detectors = {
            Canny_Detector.__name__: Canny_Detector,
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D
        }

        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600, 31 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.pupil_queue = pupil_queue
        g_pool.timebase = timebase

        # Callback functions
        def on_resize(window, w, h):
            if not g_pool.iconified:
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w, h)
                graph.adjust_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return  # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    pos = denormalize(
                        pos,
                        (frame.width, frame.height))  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(
                            pos, g_pool.u_r.handle_size + 40,
                            g_pool.u_r.handle_size + 40):
                        return  # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = float(
                glfw.glfwGetFramebufferSize(window)[0] /
                glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, (frame.width, frame.height))
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, 'user_settings_eye%s' % eye_id))
        if session_settings.get("version",
                                VersionFormat('0.0')) < g_pool.version:
            logger.info(
                "Session setting are from older version of this app. I will not use those."
            )
            session_settings.clear()
        # Initialize capture
        cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
        default_settings = {'frame_size': (640, 480), 'frame_rate': 60}
        previous_settings = session_settings.get('capture_settings', None)
        if previous_settings and previous_settings['name'] == cap.name:
            cap.settings = previous_settings
        else:
            cap.settings = default_settings

        # Test capture
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Could not retrieve image from capture")
            cap.close()
            return

        #signal world that we are ready to go
        # pipe_to_world.send('eye%s process ready'%eye_id)

        # any object we attach to the g_pool object *from now on* will only be visible to this process!
        # vars should be declared here to make them visible to the code reader.
        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get('display_mode',
                                                   'camera_image')
        g_pool.display_mode_info_text = {
            'camera_image':
            "Raw eye camera image. This uses the least amount of CPU power",
            'roi':
            "Click and drag on the blue circles to adjust the region of interest. The region should be a small as possible but big enough to capture to pupil in its movements",
            'algorithm':
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters with in the Pupil Detection menu below."
        }

        g_pool.u_r = UIRoi(frame.img.shape)
        g_pool.u_r.set(session_settings.get('roi', g_pool.u_r.get()))

        def on_frame_size_change(new_size):
            g_pool.u_r = UIRoi((new_size[1], new_size[0]))

        cap.on_frame_size_change = on_frame_size_change

        writer = None

        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,
                                                    pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s" % eye_id
        width, height = session_settings.get('window_size',
                                             (frame.width, frame.height))
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get('window_position',
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_frame(frame)
        glfw.glfwSwapInterval(0)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale', 1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(
            ui.Slider('scale',
                      g_pool.gui,
                      setter=set_scale,
                      step=.05,
                      min=1.,
                      max=2.5,
                      label='Interface Size'))
        general_settings.append(
            ui.Button(
                'Reset window size', lambda: glfw.glfwSetWindowSize(
                    main_window, frame.width, frame.height)))
        general_settings.append(
            ui.Switch('flip', g_pool, label='Flip image display'))
        general_settings.append(
            ui.Selector('display_mode',
                        g_pool,
                        setter=set_display_mode_info,
                        selection=['camera_image', 'roi', 'algorithm'],
                        labels=['Camera Image', 'ROI', 'Algorithm'],
                        label="Mode"))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.sidebar.append(general_settings)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector(
            'pupil_detector',
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Canny_Detector, Detector_2D, Detector_3D],
            labels=[
                'Python 2D detector', 'C++ 2d detector', 'C++ 3d detector'
            ],
            label="Detection method")
        general_settings.append(detector_selector)

        # let detector add its GUI
        g_pool.pupil_detector.init_gui(g_pool.sidebar)
        # let the camera add its GUI
        g_pool.capture.init_gui(g_pool.sidebar)

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_key)
        glfw.glfwSetCharCallback(main_window, on_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = frame.timestamp

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"

        #create a timer to control window update frequency
        window_update_timer = timer(1 / 60.)

        def window_should_update():
            return next(window_update_timer)

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if pipe_to_world.poll():
                cmd = pipe_to_world.recv()
                if cmd == 'Exit':
                    break
                elif cmd == "Ping":
                    pipe_to_world.send("Pong")
                    command = None
                else:
                    command, payload = cmd
                if command == 'Set_Detection_Mapping_Mode':
                    if payload == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        set_detector(Detector_2D)
                        detector_selector.read_only = False

            else:
                command = None

            # Get an image from the grabber
            try:
                frame = cap.get_frame()
            except CameraCaptureError:
                logger.error("Capture from Camera Failed. Stopping.")
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                cap.seek_to_frame(0)
                frame = cap.get_frame()

            #update performace graphs
            t = frame.timestamp
            dt, ts = t - ts, t
            try:
                fps_graph.add(1. / dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()

            ###  RECORDING of Eye Video (on demand) ###
            # Setup variables and lists for recording
            if 'Rec_Start' == command:
                record_path, raw_mode = payload
                logger.info("Will save eye video to: %s" % record_path)
                timestamps_path = os.path.join(record_path,
                                               "eye%s_timestamps.npy" % eye_id)
                if raw_mode and frame.jpeg_buffer:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = JPEG_Writer(video_path, cap.frame_rate)
                else:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = AV_Writer(video_path, cap.frame_rate)
                timestamps = []
            elif 'Rec_Stop' == command:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path, np.asarray(timestamps))
                del timestamps

            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)

            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(
                frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            g_pool.pupil_queue.put(result)

            # GL drawing
            if window_should_update():
                if not g_pool.iconified:
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image', 'roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()
                    # switch to work in pixel space
                    make_coord_system_pixel_based(
                        (frame.height, frame.width, 3), g_pool.flip)

                    if result['confidence'] > 0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly(
                                (int(result['ellipse']['center'][0]),
                                 int(result['ellipse']['center'][1])),
                                (int(result['ellipse']['axes'][0] / 2),
                                 int(result['ellipse']['axes'][1] / 2)),
                                int(result['ellipse']['angle']), 0, 360, 15)
                            draw_polyline(pts, 1, RGBA(1., 0, 0, .5))
                        draw_points([result['ellipse']['center']],
                                    size=20,
                                    color=RGBA(1., 0., 0., .5),
                                    sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize(
                )  #detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path, np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window)  #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(
            main_window)
        session_settings['version'] = g_pool.version
        session_settings[
            'last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            'pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        cap.close()

        logger.debug("Process done")
Exemplo n.º 53
0
def player(g_pool,size):
    """player
        - Shows 9 point calibration pattern
        - Plays a source video synchronized with world process
        - Get src videos from directory (glob)
        - Iterate through videos on each record event
    """


    grid = make_grid()
    grid *=2.5###scale to fit
    # player object
    player = Temp()
    player.play_list = glob('src_video/*')
    path_parent = os.path.dirname( os.path.abspath(sys.argv[0]))
    player.playlist = [os.path.join(path_parent, path) for path in player.play_list]
    player.captures = [autoCreateCapture(src) for src in player.playlist]
    print "Player found %i videos in src_video"%len(player.captures)
    player.captures =  [c for c in player.captures if c is not None]
    print "Player sucessfully loaded %i videos in src_video"%len(player.captures)
    # for c in player.captures: c.auto_rewind = False
    player.current_video = 0

    # Callbacks
    def on_resize(w, h):
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if key == GLFW_KEY_ESC:
                on_close()
    def on_char(char, pressed):
        if pressed:
            g_pool.player_input.value = char


    def on_close():
        g_pool.quit.value = True
        print "Player Process closing from window"

    def draw_circle(pos,r,c):
        pts = cv2.ellipse2Poly(tuple(pos),(r,r),0,0,360,10)
        draw_gl_polyline(pts,c,'Polygon')

    def draw_marker(pos):
        pos = int(pos[0]),int(pos[1])
        black = (0.,0.,0.,1.)
        white = (1.,1.,1.,1.)
        for r,c in zip((50,40,30,20,10),(black,white,black,white,black)):
            draw_circle(pos,r,c)

    # Initialize glfw
    glfwInit()
    glfwOpenWindow(size[0], size[1], 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Player")
    glfwSetWindowPos(100,0)
    glfwDisable(GLFW_AUTO_POLL_EVENTS)


    # Callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)


    # gl state settings
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    gl.glDisable (gl.GL_DEPTH_TEST)
    gl.glClearColor(1.,1.,1.,0.)



    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        glfwPollEvents()
        if g_pool.player_refresh.wait(0.01):
            g_pool.player_refresh.clear()

            clear_gl_screen()
            if g_pool.marker_state.value !=0:

                # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
                #
                ############################
                #            r             #
                # 0,0##################w,h #
                # #                      # #
                # #                      # #
                #r#                      #r#
                # #                      # #
                # #                      # #
                # 0,h##################w,h #
                #            r             #
                ############################
                r = 60
                gl.glMatrixMode(gl.GL_PROJECTION)
                gl.glLoadIdentity()
                # compensate for radius of marker
                gluOrtho2D(-r,glfwGetWindowSize()[0]+r,glfwGetWindowSize()[1]+r, -r) # origin in the top left corner just like the img np-array
                # Switch back to Model View Matrix
                gl.glMatrixMode(gl.GL_MODELVIEW)
                gl.glLoadIdentity()


                screen_pos = denormalize(g_pool.marker[:],glfwGetWindowSize(),flip_y=True)

                #some feedback on the detection state
                draw_marker(screen_pos)
                if g_pool.ref[:] == [0.,0.]:
                    # world ref is detected
                    draw_gl_point(screen_pos, 5.0, (1.,0.,0.,1.))
                else:
                    draw_gl_point(screen_pos, 5.0, (0.,1.,0.,1.))

            elif g_pool.play.value:
                if len(player.captures):
                    frame = player.captures[player.current_video].get_frame()
                    img = frame.img
                    if img:
                        draw_gl_texture(img)
                    else:
                        player.captures[player.current_video].rewind()
                        player.current_video +=1
                        if player.current_video >= len(player.captures):
                            player.current_video = 0
                        g_pool.play.value = False
                else:
                    print 'PLAYER: Warning: No Videos available to play. Please put your vidoes into a folder called "src_video" in the Capture folder.'
                    g_pool.play.value = False
            glfwSwapBuffers()

    glfwCloseWindow()
    glfwTerminate()
    print "PLAYER Process closed"
Exemplo n.º 54
0
Arquivo: eye.py Projeto: sleip87/pupil
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
        user_dir, version, eye_id, overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        # logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # general imports
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible
        from ui_roi import UIRoi
        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__: Detector_2D,
                           Detector_3D.__name__: Detector_3D}

        # UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 10.0
            window_position_default = (600,31+ 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        # g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.process = 'eye{}'.format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0] / glfw.glfwGetWindowSize(window)[0])
                g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
                g_pool.gui.update_window(w, h)
                g_pool.gui.collect_menus()
                for g in g_pool.graphs:
                    g.scale = hdpi_factor
                    g.adjust_window_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(pos,g_pool.capture.frame_size) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos, g_pool.u_r.handle_size + 40,g_pool.u_r.handle_size + 40):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode('utf-8') for x in range(count)]
            g_pool.capture_manager.on_drop(paths)
            g_pool.capture.on_drop(paths)

        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir, 'user_settings_eye{}'.format(eye_id)))
        if VersionFormat(session_settings.get("version", '0.0')) != g_pool.version:
            logger.info("Session setting are from a different version of this app. I will not use those.")
            session_settings.clear()


        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get(
            'display_mode', 'camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                         'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                         'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)


        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = ('UVC_Source',{
                            'preferred_names'  : cap_src,
                            'frame_size': (640,480),
                            'frame_rate': 90
                            })

        capture_source_settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__:c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and tuple(roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(
            g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)
        width, height = session_settings.get(
            'window_size', g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get(
            'window_position', window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1,1),dtype=np.uint8)+125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Selector('gui_user_scale', g_pool,
                                          setter=set_scale,
                                          selection=[.8, .9, 1., 1.1, 1.2],
                                          label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,*g_pool.capture.frame_size)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',
                                            g_pool,
                                            setter=set_display_mode_info,
                                            selection=['camera_image','roi','algorithm'],
                                            labels=['Camera Image', 'ROI', 'Algorithm'],
                                            label="Mode")
                                            )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',
                                        getter=lambda: g_pool.pupil_detector.__class__,
                                        setter=set_detector, selection=[
                                            Detector_2D, Detector_3D],
                                        labels=['C++ 2d detector',
                                                'C++ 3d detector'],
                                        label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture_source_menu.collapsed = True
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        g_pool.capture_manager.init_gui()
        g_pool.writer = None

        def replace_source(source_class_name,source_settings):
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
            g_pool.capture.init_gui()
            if g_pool.writer:
                logger.info("Done recording.")
                g_pool.writer.release()
                g_pool.writer = None

        g_pool.replace_source = replace_source # for ndsi capture

        def replace_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
                                                'capture_manager',g_pool,
                                                setter    = replace_manager,
                                                getter    = lambda: g_pool.capture_manager.__class__,
                                                selection = manager_classes,
                                                labels    = [b.gui_name for b in manager_classes],
                                                label     = 'Manager'
                                            ))


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject.startswith('eye_process.should_stop'):
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye'] and g_pool.capture.online:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path, "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame, 'h264_buffer'):
                            g_pool.writer = H264Writer(video_path,
                                                       g_pool.capture.frame_size[0],
                                                       g_pool.capture.frame_size[1],
                                                       g_pool.capture.frame_rate)
                        else:
                            g_pool.writer = AV_Writer(video_path, g_pool.capture.frame_rate)
                elif subject == 'recording.stopped':
                    if g_pool.writer:
                        logger.info("Done recording.")
                        g_pool.writer.release()
                        g_pool.writer = None
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye{}'.format(eye_id),
                        'doc': eye.__doc__
                    })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format', 'jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                elif subject.startswith('start_eye_capture') and notification['target'] == g_pool.process:
                    replace_source(notification['name'],notification['args'])

                g_pool.capture.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get('frame')
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (f_height, f_width):
                    g_pool.pupil_detector.on_resolution_change((g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]), g_pool.capture.frame_size)
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        else:
                            raise AttributeError()
                    except AttributeError:
                        pass
                    else:
                        pupil_socket.send('frame.eye.%s'%eye_id,{
                            'width': frame.width,
                            'height': frame.height,
                            'index': frame.index,
                            'timestamp': frame.timestamp,
                            'format': frame_publish_format,
                            '__raw_data__': [data]
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1./dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
                result['id'] = eye_id

                # stream the result
                pupil_socket.send('pupil.%s'%eye_id,result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == 'algorithm':
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ('camera_image', 'roi'):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()
                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame:
                        if result['method'] == '3d c++':
                            eye_ball = result['projected_sphere']
                            try:
                                pts = cv2.ellipse2Poly(
                                    (int(eye_ball['center'][0]),
                                     int(eye_ball['center'][1])),
                                    (int(eye_ball['axes'][0] / 2),
                                     int(eye_ball['axes'][1] / 2)),
                                    int(eye_ball['angle']), 0, 360, 8)
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(pts, 2, RGBA(0., .9, .1, result['model_confidence']))
                        if result['confidence'] > 0:
                            if 'ellipse' in result:
                                pts = cv2.ellipse2Poly(
                                    (int(result['ellipse']['center'][0]),
                                     int(result['ellipse']['center'][1])),
                                    (int(result['ellipse']['axes'][0] / 2),
                                     int(result['ellipse']['axes'][1] / 2)),
                                    int(result['ellipse']['angle']), 0, 360, 15)
                                confidence = result['confidence'] * 0.7
                                draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
                                draw_points([result['ellipse']['center']],
                                            size=20,
                                            color=RGBA(1., 0., 0., confidence),
                                            sharpness=1.)

                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    g_pool.gui.update()

                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict()
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")
Exemplo n.º 55
0
def eye(g_pool,cap_src,cap_size,rx_from_world,eye_id=0):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir,'eye%s.log'%eye_id),mode='w')
    # fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logger.level+10)
    # create formatter and add it to the handlers
    formatter = logging.Formatter('Eye'+str(eye_id)+' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter('EYE'+str(eye_id)+' Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)


    #UI Platform tweaks
    if platform.system() == 'Linux':
        scroll_factor = 10.0
        window_position_default = (600,300*eye_id)
    elif platform.system() == 'Windows':
        scroll_factor = 1.0
        window_position_default = (600,31+300*eye_id)
    else:
        scroll_factor = 1.0
        window_position_default = (600,300*eye_id)


    # Callback functions
    def on_resize(window,w, h):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(window)
        hdpi_factor = glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0]
        w,h = w*hdpi_factor, h*hdpi_factor
        g_pool.gui.update_window(w,h)
        graph.adjust_size(w,h)
        adjust_gl_view(w,h)
        # for p in g_pool.plugins:
            # p.on_window_resize(window,w,h)
        glfwMakeContextCurrent(active_window)

    def on_key(window, key, scancode, action, mods):
        g_pool.gui.update_key(key,scancode,action,mods)


    def on_char(window,char):
        g_pool.gui.update_char(char)


    def on_button(window,button, action, mods):
        if g_pool.display_mode == 'roi':
            if action == GLFW_RELEASE and u_r.active_edit_pt:
                u_r.active_edit_pt = False
                return # if the roi interacts we dont what the gui to interact as well
            elif action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                if u_r.mouse_over_edit_pt(pos,u_r.handle_size+40,u_r.handle_size+40):
                    return # if the roi interacts we dont what the gui to interact as well

        g_pool.gui.update_button(button,action,mods)



    def on_pos(window,x, y):
        hdpi_factor = float(glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0])
        g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

        if u_r.active_edit_pt:
            pos = normalize((x,y),glfwGetWindowSize(main_window))
            if g_pool.flip:
                pos = 1-pos[0],1-pos[1]
            pos = denormalize(pos,(frame.width,frame.height) )
            u_r.move_vertex(u_r.active_pt_idx,pos)

    def on_scroll(window,x,y):
        g_pool.gui.update_scroll(x,y*scroll_factor)

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')


    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_eye%s'%eye_id))
    if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
        logger.info("Session setting are from older version of this app. I will not use those.")
        session_settings.clear()
    # Initialize capture
    cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
    cap.frame_size = cap_size
    cap.frame_rate = 90 #default
    cap.settings = session_settings.get('capture_settings',{})


    # Test capture
    try:
        frame = cap.get_frame()
    except CameraCaptureError:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return

    g_pool.capture = cap
    g_pool.flip = session_settings.get('flip',False)
    # any object we attach to the g_pool object *from now on* will only be visible to this process!
    # vars should be declared here to make them visible to the code reader.
    g_pool.window_size = session_settings.get('window_size',1.)
    g_pool.display_mode = session_settings.get('display_mode','camera_image')
    g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be a small as possible but big enough to capture to pupil in its movements",
                                'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters with in the Pupil Detection menu below."}
    # g_pool.draw_pupil = session_settings.get('draw_pupil',True)

    u_r = UIRoi(frame.img.shape)
    u_r.set(session_settings.get('roi',u_r.get()))

    writer = None

    pupil_detector = Canny_Detector(g_pool)


    # UI callback functions
    def set_scale(new_scale):
        g_pool.gui.scale = new_scale
        g_pool.gui.collect_menus()


    def set_display_mode_info(val):
        g_pool.display_mode = val
        g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]


    window_pos = session_settings.get('window_position',window_position_default)
    width,height = session_settings.get('window_size',(frame.width, frame.height))

    # Initialize glfw
    glfwInit()
    if g_pool.binocular:
        title = "Binocular eye %s"%eye_id
    else:
        title = 'Eye'
    main_window = glfwCreateWindow(width,height, title, None, None)

    glfwMakeContextCurrent(main_window)
    cygl_init()

    # Register callbacks main_window
    glfwSetWindowSizeCallback(main_window,on_resize)
    glfwSetWindowCloseCallback(main_window,on_close)
    glfwSetKeyCallback(main_window,on_key)
    glfwSetCharCallback(main_window,on_char)
    glfwSetMouseButtonCallback(main_window,on_button)
    glfwSetCursorPosCallback(main_window,on_pos)
    glfwSetScrollCallback(main_window,on_scroll)

    # gl_state settings
    basic_gl_setup()
    g_pool.image_tex = create_named_texture(frame.img.shape)
    update_named_texture(g_pool.image_tex,frame.img)

    # refresh speed settings
    glfwSwapInterval(0)
    glfwSetWindowPos(main_window,window_pos[0],window_pos[1])


    #setup GUI
    g_pool.gui = ui.UI()
    g_pool.gui.scale = session_settings.get('gui_scale',1)
    g_pool.sidebar = ui.Scrolling_Menu("Settings",pos=(-300,0),size=(0,0),header_pos='left')
    general_settings = ui.Growing_Menu('General')
    general_settings.append(ui.Slider('scale',g_pool.gui, setter=set_scale,step = .05,min=1.,max=2.5,label='Interface Size'))
    general_settings.append(ui.Button('Reset window size',lambda: glfwSetWindowSize(main_window,frame.width,frame.height)) )
    general_settings.append(ui.Selector('display_mode',g_pool,setter=set_display_mode_info,selection=['camera_image','roi','algorithm'], labels=['Camera Image', 'ROI', 'Algorithm'], label="Mode") )
    general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
    g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])
    general_settings.append(g_pool.display_mode_info)
    g_pool.sidebar.append(general_settings)

    g_pool.gui.append(g_pool.sidebar)
    g_pool.gui.append(ui.Hot_Key("quit",setter=on_close,getter=lambda:True,label="X",hotkey=GLFW_KEY_ESCAPE))


    # let the camera add its GUI
    g_pool.capture.init_gui(g_pool.sidebar)

    # let detector add its GUI
    pupil_detector.init_gui(g_pool.sidebar)

    # load last gui configuration
    g_pool.gui.configuration = session_settings.get('ui_config',{})


    #set the last saved window size
    on_resize(main_window, *glfwGetWindowSize(main_window))

    #set up performance graphs
    pid = os.getpid()
    ps = psutil.Process(pid)
    ts = frame.timestamp

    cpu_graph = graph.Bar_Graph()
    cpu_graph.pos = (20,130)
    cpu_graph.update_fn = ps.cpu_percent
    cpu_graph.update_rate = 5
    cpu_graph.label = 'CPU %0.1f'

    fps_graph = graph.Bar_Graph()
    fps_graph.pos = (140,130)
    fps_graph.update_rate = 5
    fps_graph.label = "%0.0f FPS"

    # Event loop
    while not g_pool.quit.value:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Capture from Camera Failed. Stopping.")
            break
        except EndofVideoFileError:
            logger.warning("Video File is done. Stopping")
            break


        #update performace graphs
        t = frame.timestamp
        dt,ts = t-ts,t
        try:
            fps_graph.add(1./dt)
        except ZeroDivisionError:
            pass
        cpu_graph.update()


        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if rx_from_world.poll():
            command,raw_mode = rx_from_world.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %s"%record_path)
                video_path = os.path.join(record_path, "eye%s.mkv"%eye_id)
                timestamps_path = os.path.join(record_path, "eye%s_timestamps.npy"%eye_id)
                if raw_mode:
                    writer = JPEG_Dumper(video_path)
                else:
                    writer = CV_Writer(video_path,float(cap.frame_rate), cap.frame_size)
                timestamps = []
            else:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path,np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write_video_frame(frame)
            timestamps.append(frame.timestamp)


        # pupil ellipse detection
        result = pupil_detector.detect(frame,user_roi=u_r,visualize=g_pool.display_mode == 'algorithm')
        result['id'] = eye_id
        # stream the result
        g_pool.pupil_queue.put(result)


        # GL drawing
        glfwMakeContextCurrent(main_window)
        clear_gl_screen()

        # switch to work in normalized coordinate space
        if g_pool.display_mode == 'algorithm':
            update_named_texture(g_pool.image_tex,frame.img)
        elif g_pool.display_mode in ('camera_image','roi'):
            update_named_texture(g_pool.image_tex,frame.gray)
        else:
            pass

        make_coord_system_norm_based(g_pool.flip)
        draw_named_texture(g_pool.image_tex)
        # switch to work in pixel space
        make_coord_system_pixel_based((frame.height,frame.width,3),g_pool.flip)

        if result['confidence'] >0:
            if result.has_key('axes'):
                pts = cv2.ellipse2Poly( (int(result['center'][0]),int(result['center'][1])),
                                        (int(result['axes'][0]/2),int(result['axes'][1]/2)),
                                        int(result['angle']),0,360,15)
                cygl_draw_polyline(pts,1,cygl_rgba(1.,0,0,.5))
            cygl_draw_points([result['center']],size=20,color=cygl_rgba(1.,0.,0.,.5),sharpness=1.)

        # render graphs
        graph.push_view()
        fps_graph.draw()
        cpu_graph.draw()
        graph.pop_view()

        # render GUI
        g_pool.gui.update()

        #render the ROI
        if g_pool.display_mode == 'roi':
            u_r.draw(g_pool.gui.scale)

        #update screen
        glfwSwapBuffers(main_window)
        glfwPollEvents()


    # END while running

    # in case eye recording was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path,np.asarray(timestamps))


    # save session persistent settings
    session_settings['gui_scale'] = g_pool.gui.scale
    session_settings['roi'] = u_r.get()
    session_settings['flip'] = g_pool.flip
    session_settings['display_mode'] = g_pool.display_mode
    session_settings['ui_config'] = g_pool.gui.configuration
    session_settings['capture_settings'] = g_pool.capture.settings
    session_settings['window_size'] = glfwGetWindowSize(main_window)
    session_settings['window_position'] = glfwGetWindowPos(main_window)
    session_settings['version'] = g_pool.version
    session_settings.close()

    pupil_detector.cleanup()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    cap.close()

    #flushing queue in case world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
Exemplo n.º 56
0
def eye(pupil_queue, timebase, pipe_to_world, is_alive_flag, user_dir, version, eye_id, cap_src):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """
    is_alive = Is_Alive_Manager(is_alive_flag)
    with is_alive:
        import logging
        # Set up root logger for this process before doing imports of logged modules.
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        # remove inherited handlers
        logger.handlers = []
        # create file handler which logs even debug messages
        fh = logging.FileHandler(os.path.join(user_dir,'eye%s.log'%eye_id),mode='w')
        # fh.setLevel(logging.DEBUG)
        # create console handler with a higher log level
        ch = logging.StreamHandler()
        ch.setLevel(logger.level+10)
        # create formatter and add it to the handlers
        formatter = logging.Formatter('Eye'+str(eye_id)+' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        formatter = logging.Formatter('EYE'+str(eye_id)+' Process [%(levelname)s] %(name)s : %(message)s')
        ch.setFormatter(formatter)
        # add the handlers to the logger
        logger.addHandler(fh)
        logger.addHandler(ch)
        #silence noisy modules
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        # create logger for the context of this function
        logger = logging.getLogger(__name__)


        # We deferr the imports becasue of multiprocessing.
        # Otherwise the world process each process also loads the other imports.

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui,graph,cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen ,make_coord_system_pixel_based,make_coord_system_norm_based, make_coord_system_eye_camera_based
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math


        # helpers/utils
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
        from av_writer import JPEG_Writer,AV_Writer

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}



        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600,300*eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600,31+300*eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600,300*eye_id)


        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.pupil_queue = pupil_queue
        g_pool.timebase = timebase


        # Callback functions
        def on_resize(window,w, h):
            if not g_pool.iconified:
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                g_pool.gui.update_window(w,h)
                graph.adjust_size(w,h)
                adjust_gl_view(w,h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key,scancode,action,mods)

        def on_char(window,char):
            g_pool.gui.update_char(char)

        def on_iconify(window,iconified):
            g_pool.iconified = iconified

        def on_button(window,button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos,glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1-pos[0],1-pos[1]
                    pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,g_pool.u_r.handle_size+40,g_pool.u_r.handle_size+40):
                        return # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button,action,mods)



        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)

        def on_scroll(window,x,y):
            g_pool.gui.update_scroll(x,y*scroll_factor)


        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_eye%s'%eye_id))
        if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
            logger.info("Session setting are from older version of this app. I will not use those.")
            session_settings.clear()
        # Initialize capture
        cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
        default_settings = {'frame_size':(640,480),'frame_rate':60}
        previous_settings = session_settings.get('capture_settings',None)
        if previous_settings and previous_settings['name'] == cap.name:
            cap.settings = previous_settings
        else:
            cap.settings = default_settings


        # Test capture
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Could not retrieve image from capture")
            cap.close()
            return

        #signal world that we are ready to go
        # pipe_to_world.send('eye%s process ready'%eye_id)

        # any object we attach to the g_pool object *from now on* will only be visible to this process!
        # vars should be declared here to make them visible to the code reader.
        g_pool.iconified = False
        g_pool.capture = cap
        g_pool.flip = session_settings.get('flip',False)
        g_pool.display_mode = session_settings.get('display_mode','camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                    'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                    'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        g_pool.u_r = UIRoi(frame.img.shape)
        g_pool.u_r.set(session_settings.get('roi',g_pool.u_r.get()))


        def on_frame_size_change(new_size):
            g_pool.u_r = UIRoi((new_size[1],new_size[0]))

        cap.on_frame_size_change = on_frame_size_change

        writer = None

        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui.scale = new_scale
            g_pool.gui.collect_menus()


        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]


        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)


        # Initialize glfw
        glfw.glfwInit()
        title = "eye %s"%eye_id
        width,height = session_settings.get('window_size',(frame.width, frame.height))
        main_window = glfw.glfwCreateWindow(width,height, title, None, None)
        window_pos = session_settings.get('window_position',window_position_default)
        glfw.glfwSetWindowPos(main_window,window_pos[0],window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_frame(frame)
        glfw.glfwSwapInterval(0)

        sphere  = Sphere(20)

        #setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui.scale = session_settings.get('gui_scale',1)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",pos=(-300,0),size=(0,0),header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(ui.Slider('scale',g_pool.gui, setter=set_scale,step = .05,min=1.,max=2.5,label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,frame.width,frame.height)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',g_pool,setter=set_display_mode_info,selection=['camera_image','roi','algorithm'], labels=['Camera Image', 'ROI', 'Algorithm'], label="Mode") )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])
        general_settings.append(g_pool.display_mode_info)
        g_pool.sidebar.append(general_settings)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',getter = lambda: g_pool.pupil_detector.__class__ ,setter=set_detector,selection=[Detector_2D, Detector_3D],labels=['C++ 2d detector', 'C++ 3d detector'], label="Detection method")
        general_settings.append(detector_selector)

        # let detector add its GUI
        g_pool.pupil_detector.init_gui(g_pool.sidebar)
        # let the camera add its GUI
        g_pool.capture.init_gui(g_pool.sidebar)


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window,on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window,on_iconify)
        glfw.glfwSetKeyCallback(main_window,on_key)
        glfw.glfwSetCharCallback(main_window,on_char)
        glfw.glfwSetMouseButtonCallback(main_window,on_button)
        glfw.glfwSetCursorPosCallback(main_window,on_pos)
        glfw.glfwSetScrollCallback(main_window,on_scroll)

        #set the last saved window size
        on_resize(main_window, *glfw.glfwGetWindowSize(main_window))


        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config',{})


        #set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = frame.timestamp

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20,130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140,130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"


        #create a timer to control window update frequency
        window_update_timer = timer(1/60.)
        def window_should_update():
            return next(window_update_timer)


        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if pipe_to_world.poll():
                cmd = pipe_to_world.recv()
                if cmd == 'Exit':
                    break
                elif cmd == "Ping":
                    pipe_to_world.send("Pong")
                    command = None
                else:
                    command,payload = cmd
                if command == 'Set_Detection_Mapping_Mode':
                    if payload == '3d':
                        if not isinstance(g_pool.pupil_detector,Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only  = True
                    else:
                        set_detector(Detector_2D)
                        detector_selector.read_only = False

            else:
                command = None



            # Get an image from the grabber
            try:
                frame = cap.get_frame()
            except CameraCaptureError:
                logger.error("Capture from Camera Failed. Stopping.")
                break
            except EndofVideoFileError:
                logger.warning("Video File is done. Stopping")
                cap.seek_to_frame(0)
                frame = cap.get_frame()


            #update performace graphs
            t = frame.timestamp
            dt,ts = t-ts,t
            try:
                fps_graph.add(1./dt)
            except ZeroDivisionError:
                pass
            cpu_graph.update()


            ###  RECORDING of Eye Video (on demand) ###
            # Setup variables and lists for recording
            if 'Rec_Start' == command:
                record_path,raw_mode = payload
                logger.info("Will save eye video to: %s"%record_path)
                timestamps_path = os.path.join(record_path, "eye%s_timestamps.npy"%eye_id)
                if raw_mode and frame.jpeg_buffer:
                    video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                    writer = JPEG_Writer(video_path,cap.frame_rate)
                else:
                    video_path = os.path.join(record_path, "eye%s.mp4"%eye_id)
                    writer = AV_Writer(video_path,cap.frame_rate)
                timestamps = []
            elif 'Rec_Stop' == command:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path,np.asarray(timestamps))
                del timestamps

            if writer:
                writer.write_video_frame(frame)
                timestamps.append(frame.timestamp)


            # pupil ellipse detection
            result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
            result['id'] = eye_id
            # stream the result
            g_pool.pupil_queue.put(result)

            # GL drawing
            if window_should_update():
                if not g_pool.iconified:
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    # switch to work in normalized coordinate space
                    if g_pool.display_mode == 'algorithm':
                        g_pool.image_tex.update_from_ndarray(frame.img)
                    elif g_pool.display_mode in ('camera_image','roi'):
                        g_pool.image_tex.update_from_ndarray(frame.gray)
                    else:
                        pass

                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    window_size =  glfw.glfwGetWindowSize(main_window)
                    make_coord_system_pixel_based((frame.height,frame.width,3),g_pool.flip)

                    if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                                (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                                int(eye_ball['angle']),0,360,8)
                        except ValueError as e:
                            pass
                        else:
                            draw_polyline(pts,2,RGBA(0.,.9,.1,result['model_confidence']) )

                    if result['confidence'] >0:
                        if result.has_key('ellipse'):
                            pts = cv2.ellipse2Poly( (int(result['ellipse']['center'][0]),int(result['ellipse']['center'][1])),
                                            (int(result['ellipse']['axes'][0]/2),int(result['ellipse']['axes'][1]/2)),
                                            int(result['ellipse']['angle']),0,360,15)
                            confidence = result['confidence'] * 0.7 #scale it a little
                            draw_polyline(pts,1,RGBA(1.,0,0,confidence))
                            draw_points([result['ellipse']['center']],size=20,color=RGBA(1.,0.,0.,confidence),sharpness=1.)

                    # render graphs
                    graph.push_view()
                    fps_graph.draw()
                    cpu_graph.draw()
                    graph.pop_view()

                    # render GUI
                    g_pool.gui.update()

                    #render the ROI
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw(g_pool.gui.scale)

                    #update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize() #detector decides if we visualize or not


        # END while running

        # in case eye recording was still runnnig: Save&close
        if writer:
            logger.info("Done recording eye.")
            writer = None
            np.save(timestamps_path,np.asarray(timestamps))

        glfw.glfwRestoreWindow(main_window) #need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui.scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.settings
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = g_pool.version
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        cap.close()


        logger.debug("Process done")
Exemplo n.º 57
0
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
        user_dir, version, eye_id, glint_queue, glint_vector_queue, overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        # logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # general imports
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible
        from ui_roi import UIRoi
        # monitoring
        import psutil
        import math
        from time import time
        import json


        # helpers/utils
        from file_methods import Persistent_Dict, load_object

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__: Detector_2D,
                           Detector_3D.__name__: Detector_3D}

        # UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 10.0
            window_position_default = (600,31+ 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        # g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.process = 'eye{}'.format(eye_id)
        g_pool.timebase = timebase
        g_pool.glints = glint_queue
        g_pool.glint_pupil_vectors = glint_vector_queue

        glint_detector = Glint_Detector(g_pool)
        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value
        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0] / glfw.glfwGetWindowSize(window)[0])
                g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
                g_pool.gui.update_window(w, h)
                g_pool.gui.collect_menus()
                for g in g_pool.graphs:
                    g.scale = hdpi_factor
                    g.adjust_window_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(pos,g_pool.capture.frame_size) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos, g_pool.u_r.handle_size + 40,g_pool.u_r.handle_size + 40):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)


        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)



        # load session persistent settings
        session_settings = Persistent_Dict(os.path.join(g_pool.user_dir, 'user_settings_eye{}'.format(eye_id)))
        if VersionFormat(session_settings.get("version", '0.0')) != g_pool.version:
            logger.info("Session setting are from a different version of this app. I will not use those.")
            session_settings.clear()


        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get(
            'display_mode', 'camera_image')
        g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                         'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                         'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}


        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager',{}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__:c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)


        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,pupil_detector_settings)
        g_pool.pupil_settings = "default"

        if eye_id == 0:
            cap_src = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
        else:
            cap_src = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]

        # Initialize capture
        default_settings = ('UVC_Source',{
                            'preferred_names'  : cap_src,
                            'frame_size': (640,480),
                            'frame_rate': 90
                            })

        capture_source_settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__:c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)


        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(
            g_pool, pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        def convert_keys_to_string(dictionary):
            """Recursively converts dictionary keys to strings."""
            if not isinstance(dictionary, dict):
                return dictionary
            return dict((str(k.decode('utf8')), convert_keys_to_string(v))
                for k, v in dictionary.items())

        def set_pupil_settings(new_settings):
            g_pool.pupil_settings = new_settings

            if not new_settings == "default":
                try:
                    path = os.path.join(g_pool.user_dir,'pupil_settings_' + new_settings + '.json')
                    with open(path, 'r') as fp:
                        json_str = fp.read()
                        pupil_settings_new = json.loads(json_str)
                except:
                    logger.error("Settings don't exist")
                #pupil_settings_new = convert_keys_to_string(pupil_settings_new)
                pupil_settings_new['2D_Settings'] = dict(pupil_settings_new)
                pupil_settings = g_pool.pupil_detector.get_settings()
                controls = g_pool.capture.uvc_capture.controls
                controls_dict = dict([(c.display_name,c) for c in controls])
                try:
                    g_pool.capture.frame_rate = pupil_settings_new['frame_rate']
                    g_pool.capture.frame_size = pupil_settings_new['frame_size']
                except:
                    logger.info("no frame rate and frame size in camera settings")
                for key in controls_dict:
                    print(key)
                    try:
                        controls_dict[key].value = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in camera settings" %key)
                for key in pupil_settings.keys():
                    print(key)
                    try:
                        if type(pupil_settings[key]) == dict:
                            for sec_key in pupil_settings[key].keys():
                                pupil_settings[key][sec_key] = pupil_settings_new[key][sec_key]
                        else:
                            pupil_settings[key] = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in pupil settings" %key)
        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)
        width, height = session_settings.get(
            'window_size', g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get(
            'window_position', window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(np.ones((1,1),dtype=np.uint8)+125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')

        general_settings.append(ui.Selector('pupil_settings',g_pool,setter=set_pupil_settings,selection=['default','indoors','outdoors_sunny', 'outdoors_cloudy', 'vanilla'], labels=['Default', 'Indoors', 'Outdoors Sunny', 'Outdoors Cloudy', 'Vanilla'], label="Pupil settings") )
        general_settings.append(ui.Selector('gui_user_scale', g_pool,
                                          setter=set_scale,
                                          selection=[.8, .9, 1., 1.1, 1.2],
                                          label='Interface Size'))
        general_settings.append(ui.Button('Reset window size',lambda: glfw.glfwSetWindowSize(main_window,*g_pool.capture.frame_size)) )
        general_settings.append(ui.Switch('flip',g_pool,label='Flip image display'))
        general_settings.append(ui.Selector('display_mode',
                                            g_pool,
                                            setter=set_display_mode_info,
                                            selection=['camera_image','roi','algorithm'],
                                            labels=['Camera Image', 'ROI', 'Algorithm'],
                                            label="Mode")
                                            )
        g_pool.display_mode_info = ui.Info_Text(g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector('pupil_detector',
                                        getter=lambda: g_pool.pupil_detector.__class__,
                                        setter=set_detector, selection=[
                                            Detector_2D, Detector_3D],
                                        labels=['C++ 2d detector',
                                                'C++ 3d detector'],
                                        label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture_source_menu.collapsed = True
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        g_pool.capture_manager.init_gui()
        g_pool.writer = None

        def replace_source(source_class_name,source_settings):
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
            g_pool.capture.init_gui()
            if g_pool.writer:
                logger.info("Done recording.")
                g_pool.writer.release()
                g_pool.writer = None

        g_pool.replace_source = replace_source # for ndsi capture

        def replace_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(0,ui.Selector(
                                                'capture_manager',g_pool,
                                                setter    = replace_manager,
                                                getter    = lambda: g_pool.capture_manager.__class__,
                                                selection = manager_classes,
                                                labels    = [b.gui_name for b in manager_classes],
                                                label     = 'Manager'
                                            ))


        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_key)
        glfw.glfwSetCharCallback(main_window, on_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye'] and g_pool.capture.online:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info("Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path, "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame, 'h264_buffer'):
                            g_pool.writer = H264Writer(video_path,
                                                       g_pool.capture.frame_size[0],
                                                       g_pool.capture.frame_size[1],
                                                       g_pool.capture.frame_rate)
                        else:
                            g_pool.writer = AV_Writer(video_path, g_pool.capture.frame_rate)
                elif subject == 'recording.stopped':
                    if g_pool.writer:
                        logger.info("Done recording.")
                        g_pool.writer.release()
                        g_pool.writer = None
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye{}'.format(eye_id),
                        'doc': eye.__doc__
                    })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format', 'jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                elif subject.startswith('start_eye_capture') and notification['target'] == g_pool.process:
                    replace_source(notification['name'],notification['args'])

                g_pool.capture.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get('frame')
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (f_height, f_width):
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames and frame.jpeg_buffer:
                    if   frame_publish_format == "jpeg":
                        data = frame.jpeg_buffer
                    elif frame_publish_format == "yuv":
                        data = frame.yuv_buffer
                    elif frame_publish_format == "bgr":
                        data = frame.bgr
                    elif frame_publish_format == "gray":
                        data = frame.gray
                    pupil_socket.send('frame.eye.%s'%eye_id,{
                        'width': frame.width,
                        'height': frame.width,
                        'index': frame.index,
                        'timestamp': frame.timestamp,
                        'format': frame_publish_format,
                        '__raw_data__': [data]
                    })

                t = frame.timestamp
                tUnix = time()
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1./dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result, roi = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
                result['id'] = eye_id
                result['unix_ts'] = tUnix

                #glint detection
                #glints = [[0,0,0,0,0,0], [0,0,0,0,0,1]] #glint_detector.glint(frame, eye_id, u_roi=g_pool.u_r, pupil=result, roi=roi)
                #result['glints'] = glints

                #g_pool.glints.put(glints)


                #save glint-pupil vector results
                #if glints[0][3]:
                #    glint_pupil_vector = {'timestamp': glints[0][0], 'x': result['norm_pos'][0]-glints[0][3], 'y': result['norm_pos'][1]-glints[0][4], 'pupil_confidence': result['confidence'], 'glint_found': True, 'id': eye_id, 'x2': result['norm_pos'][0]-glints[1][3], 'y2': result['norm_pos'][1]-glints[1][4]}
                #else:
                #     glint_pupil_vector = {'timestamp': glints[0][0], 'x': result['norm_pos'][0]-glints[0][3], 'y': result['norm_pos'][1]-glints[0][4], 'pupil_confidence': result['confidence'], 'glint_found': False, 'id': eye_id, 'x2': result['norm_pos'][0]-glints[1][3], 'y2': result['norm_pos'][1]-glints[1][4]}
                #g_pool.glint_pupil_vectors.put(glint_pupil_vector)

                # stream the result
                pupil_socket.send('pupil.%s'%eye_id,result)

            cpu_graph.update()




            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == 'algorithm':
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ('camera_image', 'roi'):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    #if frame:
                    #    glints = np.array(result['glints'])
                    #    if len(glints)>0 and glints[0][3]:
                    #        if glints[1][3]:
                    #            cygl_draw_points(glints[:,1:3], size=20,color=cygl_rgba(0.,0.,1.,.5),sharpness=1.)
                    #        elif result['confidence'] > 0.75:
                    #            cygl_draw_points(glints[:,1:3], size=20,color=cygl_rgba(0.,0.,1.,.5),sharpness=1.)

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
                    if frame:
                        if result['method'] == '3d c++':
                            eye_ball = result['projected_sphere']
                            try:
                                pts = cv2.ellipse2Poly(
                                    (int(eye_ball['center'][0]),
                                     int(eye_ball['center'][1])),
                                    (int(eye_ball['axes'][0] / 2),
                                     int(eye_ball['axes'][1] / 2)),
                                    int(eye_ball['angle']), 0, 360, 8)
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(pts, 2, RGBA(0., .9, .1, result['model_confidence']))
                        if result['confidence'] > 0:
                            if 'ellipse' in result:
                                pts = cv2.ellipse2Poly(
                                    (int(result['ellipse']['center'][0]),
                                     int(result['ellipse']['center'][1])),
                                    (int(result['ellipse']['axes'][0] / 2),
                                     int(result['ellipse']['axes'][1] / 2)),
                                    int(result['ellipse']['angle']), 0, 360, 15)
                                confidence = result['confidence'] * 0.7
                                draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
                                draw_points([result['ellipse']['center']],
                                            size=20,
                                            color=RGBA(1., 0., 0., confidence),
                                            sharpness=1.)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    g_pool.gui.update()

                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize()  # detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict()
        session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        glint_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")