コード例 #1
0
ファイル: marker_detector.py プロジェクト: r0bert0/pupil
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """

        for m in self.markers:
            hat = np.array([[[0,0],[0,1],[.5,1.3],[1,1],[1,0],[0,0]]],dtype=np.float32)
            hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6,2)),(0.1,1.,1.,.5))

        for s in  self.surfaces:
            s.gl_draw_frame()

        if self.surface_edit_mode.value:
            for s in  self.surfaces:
                s.gl_draw_corners()


        if self._window and self.surfaces:
            try:
                s = self.surfaces[self.show_surface_idx.value]
            except IndexError:
                s = None
            if s and s.detected:
                self.gl_display_in_window(s)
コード例 #2
0
    def gl_display_cache_bars(self):
        """
        """
        padding = 20.
        frame_max = len(self.g_pool.timestamps) #last marker is garanteed to be frame max.

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (frame_max-2)/float(width)
        v_pad = padding* 1./(height-2)
        gluOrtho2D(-h_pad,  (frame_max-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()
        glTranslatef(0,-.02,0)
        color = (7.,.1,.2,8.)
        draw_gl_polyline(self.gl_display_ranges,color=color,type='Lines',thickness=2)


        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #3
0
ファイル: seek_bar.py プロジェクト: Reidzhang/pupil
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0.,.8,.5,.5)
            color2 = (0.,.8,.5,1.)
        else:
            color1 = (.25,.8,.8,.5)
            color2 = (.25,.8,.8,1.)

        draw_gl_polyline( [(0,0),(self.current_frame_index,0)],color=color1)
        draw_gl_polyline( [(self.current_frame_index,0),(self.frame_count,0)],color=(.5,.5,.5,.5))
        draw_gl_point((self.current_frame_index,0),color=color1,size=40)
        draw_gl_point((self.current_frame_index,0),color=color2,size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #4
0
ファイル: accuracy_test.py プロジェクト: Esperadoce/pupil
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,15)
                draw_gl_polyline(pts,(0.,1.,0,1.))
        else:
            pass
        if self._window:
            self.gl_display_in_window()


        if not self.active and self.error_lines is not None:
            draw_gl_polyline_norm(self.error_lines,(1.,0.5,0.,.5),type='Lines')
            draw_gl_points_norm(self.error_lines[1::2],color=(.0,0.5,0.5,.5),size=3)
            draw_gl_points_norm(self.error_lines[0::2],color=(.5,0.0,0.0,.5),size=3)
コード例 #5
0
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad,
            1 + self.v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0., .8, .5, .5)
            color2 = (0., .8, .5, 1.)
        else:
            color1 = (.25, .8, .8, .5)
            color2 = (.25, .8, .8, 1.)

        draw_gl_polyline([(0, 0), (self.current_frame_index, 0)], color=color1)
        draw_gl_polyline([(self.current_frame_index, 0),
                          (self.frame_count, 0)],
                         color=(.5, .5, .5, .5))
        draw_gl_point((self.current_frame_index, 0), color=color1, size=40)
        draw_gl_point((self.current_frame_index, 0), color=color2, size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #6
0
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = glfwGetWindowSize(glfwGetCurrentContext())
        h_pad = self.padding / width
        v_pad = self.padding / height
        gluOrtho2D(-h_pad, 1 + h_pad, -v_pad, 1 + v_pad)  # gl coord convention
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0., .8, .5, .5)
            color2 = (0., .8, .5, 1.)
        else:
            color1 = (.25, .8, .8, .5)
            color2 = (.25, .8, .8, 1.)

        draw_gl_polyline([(0, 0), (self.norm_seek_pos, 0)], color=color1)
        draw_gl_polyline([(self.norm_seek_pos, 0), (1, 0)],
                         color=(.5, .5, .5, .5))
        draw_gl_point((self.norm_seek_pos, 0), color=color1, size=40)
        draw_gl_point((self.norm_seek_pos, 0), color=color2, size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #7
0
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """

        for m in self.markers:
            hat = np.array(
                [[[0, 0], [0, 1], [.5, 1.3], [1, 1], [1, 0], [0, 0]]],
                dtype=np.float32)
            hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6, 2)), (0.1, 1., 1., .5))

        for s in self.surfaces:
            s.gl_draw_frame()

        if self.surface_edit_mode.value:
            for s in self.surfaces:
                s.gl_draw_corners()

        if self._window and self.surfaces:
            try:
                s = self.surfaces[self.show_surface_idx.value]
            except IndexError:
                s = None
            if s and s.detected:
                self.gl_display_in_window(s)
コード例 #8
0
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(
            -self.h_pad, (self.frame_count) + self.h_pad, -self.v_pad,
            1 + self.v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color1 = (.1, .9, .2, .5)
        color2 = (.1, .9, .2, .5)

        if self.in_mark != 0 or self.out_mark != self.frame_count:
            draw_gl_polyline([(self.in_mark, 0), (self.out_mark, 0)],
                             color=(.1, .9, .2, .5),
                             thickness=2)
        draw_gl_point((self.in_mark, 0), color=color2, size=10)
        draw_gl_point((self.out_mark, 0), color=color2, size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #9
0
    def gl_display_cache_bars(self):
        """
        """
        padding = 20.
        frame_max = len(self.g_pool.timestamps) #last marker is garanteed to be frame max.

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (frame_max-2)/float(width)
        v_pad = padding* 1./(height-2)
        gluOrtho2D(-h_pad,  (frame_max-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()
        glTranslatef(0,-.02,0)
        color = (7.,.1,.2,8.)
        draw_gl_polyline(self.gl_display_ranges,color=color,type='Lines',thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #10
0
ファイル: seek_bar.py プロジェクト: Esperadoce/pupil
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = glfwGetWindowSize(glfwGetCurrentContext())
        h_pad = self.padding/width
        v_pad = self.padding/height
        gluOrtho2D(-h_pad, 1+h_pad, -v_pad, 1+v_pad) # gl coord convention
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        if self.drag_mode:
            color1 = (0.,.8,.5,.5)
            color2 = (0.,.8,.5,1.)
        else:
            color1 = (.25,.8,.8,.5)
            color2 = (.25,.8,.8,1.)

        draw_gl_polyline( [(0,0),(self.norm_seek_pos,0)],color=color1)
        draw_gl_polyline( [(self.norm_seek_pos,0),(1,0)],color=(.5,.5,.5,.5))
        draw_gl_point((self.norm_seek_pos,0),color=color1,size=40)
        draw_gl_point((self.norm_seek_pos,0),color=color2,size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #11
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,15)
                draw_gl_polyline(pts,(0.,1.,0,1.))
        else:
            pass
        if self._window:
            self.gl_display_in_window()


        if not self.active and self.error_lines is not None:
            draw_gl_polyline_norm(self.error_lines,(1.,0.5,0.,.5),type='Lines')
            draw_gl_points_norm(self.error_lines[1::2],color=(.0,0.5,0.5,.5),size=3)
            draw_gl_points_norm(self.error_lines[0::2],color=(.5,0.0,0.0,.5),size=3)
コード例 #12
0
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        if self.mode == "Show markers and frames":
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[.5,1.3],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                draw_gl_polyline(hat.reshape((6,2)),(0.1,1.,1.,.5))

            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)


        for s in self.surfaces:
            if self.locate_3d:
                s.gl_display_in_window_3d(self.g_pool.image_tex,self.camera_intrinsics)
            else:
                s.gl_display_in_window(self.g_pool.image_tex)


        if self.mode == "Surface edit mode":
            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)
                s.gl_draw_corners()
コード例 #13
0
 def draw_circle(self, frame, pos,r,c):
     ''' function that is called by draw_marker exclusively ''' 
     pts = cv2.ellipse2Poly(tuple(pos),(r,r),0,0,360,10)
     draw_gl_polyline(pts,c,'Polygon')
     world_size =  self.world_size
     window_size = glfwGetWindowSize(self._window)
     ratio = [float(world_size[0])/window_size[0], float(world_size[1])/window_size[1]]
     pos_0 = ratio[0] * pos[0] * .75 + world_size[0] * (1 - .75)/2
     pos_1 = ratio[1] * pos[1] * .75 + world_size[1] * (1 - .75)/2
     pos_1 = int(pos_0), int(pos_1)
     c = cv2.cv.CV_RGB(c[0]*255, c[1]*255, c[2]*255)
     cv2.ellipse(frame, tuple(pos_1),(r,r),0,0,360, c)
コード例 #14
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """
        for grid_points in self.img_points:
            calib_bounds =  cv2.convexHull(grid_points)[:,0] #we dont need that extra encapsulation that opencv likes so much
            draw_gl_polyline(calib_bounds,(0.,0.,1.,.5), type="Loop")

        if self._window:
            self.gl_display_in_window()
コード例 #15
0
    def gl_display(self):
        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        #Draw...............................................................................
        
        for pos_to_draw in self.pos_begin_trial:   
            draw_gl_point((pos_to_draw, 0), size = 5, color = (.5, .5, .5, .5))
            draw_gl_polyline( [(pos_to_draw,.05),(pos_to_draw,0)], color = (.5, .5, .5, .5))

        for pos_to_draw in self.pos_first_response:
            draw_gl_point((pos_to_draw, 0), size = 5, color = (.0, .0, .5, 1.))    
            draw_gl_polyline( [(pos_to_draw,.025),(pos_to_draw,0)], color = (.0, .0, .7, 1.))

        for pos_to_draw in self.pos_end_limited_hold:
            draw_gl_point((pos_to_draw, 0), size = 5, color = (.5, .0, .0, 1.)) 
            draw_gl_polyline( [(pos_to_draw,.025),(pos_to_draw,0)], color = (.7, .0, .0, 1.))
        
        for x, first_response in enumerate(self.pos_first_response):
            draw_gl_polyline( [(self.pos_end_limited_hold[x],.025),(first_response,.025)], color = (1., .5, .0, 1.))    

        #Draw...............................................................................

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #16
0
 def draw_circle(self, frame, pos, r, c):
     ''' function that is called by draw_marker exclusively '''
     pts = cv2.ellipse2Poly(tuple(pos), (r, r), 0, 0, 360, 10)
     draw_gl_polyline(pts, c, 'Polygon')
     world_size = self.world_size
     window_size = glfwGetWindowSize(self._window)
     ratio = [
         float(world_size[0]) / window_size[0],
         float(world_size[1]) / window_size[1]
     ]
     pos_0 = ratio[0] * pos[0] * .75 + world_size[0] * (1 - .75) / 2
     pos_1 = ratio[1] * pos[1] * .75 + world_size[1] * (1 - .75) / 2
     pos_1 = int(pos_0), int(pos_1)
     c = cv2.cv.CV_RGB(c[0] * 255, c[1] * 255, c[2] * 255)
     cv2.ellipse(frame, tuple(pos_1), (r, r), 0, 0, 360, c)
コード例 #17
0
ファイル: marker_detector.py プロジェクト: Azique/pupil
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        for m in self.markers:
            hat = np.array([[[0,0],[0,1],[.5,1.3],[1,1],[1,0],[0,0]]],dtype=np.float32)
            hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6,2)),(0.1,1.,1.,.5))

        for s in self.surfaces:
            s.gl_draw_frame()
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.surface_edit_mode.value:
            for s in  self.surfaces:
                s.gl_draw_corners()
コード例 #18
0
    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

        # Lines for areas that have be cached
        cached_ranges = []
        for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
            cached_ranges += (r[0], 0), (r[1], 0)  #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                    found_at += (r[0], 0), (r[1], 0
                                            )  #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = self.win_size
        h_pad = padding * (self.cache.length - 2) / float(width)
        v_pad = padding * 1. / (height - 2)
        gluOrtho2D(
            -h_pad, (self.cache.length - 1) + h_pad, -v_pad, 1 + v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8., .6, .2, 8.)
        draw_gl_polyline(cached_ranges, color=color, type='Lines', thickness=4)

        color = (0., .7, .3, 8.)

        for s in cached_surfaces:
            glTranslatef(0, .02, 0)
            draw_gl_polyline(s, color=color, type='Lines', thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #19
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,15)
                draw_gl_polyline(pts,(0.,1.,0,1.))
        else:
            pass
コード例 #20
0
    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        for m in self.markers:
            hat = np.array([[[0,0],[0,1],[.5,1.3],[1,1],[1,0],[0,0]]],dtype=np.float32)
            hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6,2)),(0.1,1.,1.,.5))


        for s in self.surfaces:
            s.gl_draw_frame()
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.surface_edit_mode.value:
            for s in  self.surfaces:
                s.gl_draw_corners()
コード例 #21
0
    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have be cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        gluOrtho2D(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8.,.6,.2,8.)
        draw_gl_polyline(cached_ranges,color=color,type='Lines',thickness=4)

        color = (0.,.7,.3,8.)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_gl_polyline(s,color=color,type='Lines',thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #22
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly((int(e[0][0]), int(e[0][1])),
                                       (int(e[1][0] / 2), int(e[1][1] / 2)),
                                       int(e[-1]), 0, 360, 15)
                draw_gl_polyline(pts, (0., 1., 0, 1.))
        else:
            pass
        if self._window:
            self.gl_display_in_window()
コード例 #23
0
ファイル: trim_marks.py プロジェクト: Azique/pupil
    def gl_display(self):

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        gluOrtho2D(-self.h_pad,  (self.frame_count)+self.h_pad, -self.v_pad, 1+self.v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)
        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color1 = (.1,.9,.2,.5)
        color2 = (.1,.9,.2,.5)

        if self.in_mark != 0 or self.out_mark != self.frame_count:
            draw_gl_polyline( [(self.in_mark,0),(self.out_mark,0)],color=(.1,.9,.2,.5),thickness=2)
        draw_gl_point((self.in_mark,0),color=color2,size=10)
        draw_gl_point((self.out_mark,0),color=color2,size=10)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()
コード例 #24
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active:
            draw_gl_point_norm(self.smooth_pos,
                               size=15,
                               color=(1., 1., 0., .5))

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly((int(e[0][0]), int(e[0][1])),
                                       (int(e[1][0] / 2), int(e[1][1] / 2)),
                                       int(e[-1]), 0, 360, 15)
                draw_gl_polyline(pts, (0., 1., 0, 1.))

            if self.counter:
                # lets draw an indicator on the count
                e = self.candidate_ellipses[2]
                pts = cv2.ellipse2Poly((int(e[0][0]), int(e[0][1])),
                                       (int(e[1][0] / 2), int(e[1][1] / 2)),
                                       int(e[-1]), 0, 360,
                                       360 / self.counter_max)
                indicator = [e[0]] + pts[self.counter:].tolist()[::-1] + [e[0]]
                draw_gl_polyline(indicator, (0.1, .5, .7, .8), type='Polygon')

            if self.auto_stop:
                # lets draw an indicator on the autostop count
                e = self.candidate_ellipses[2]
                pts = cv2.ellipse2Poly((int(e[0][0]), int(e[0][1])),
                                       (int(e[1][0] / 2), int(e[1][1] / 2)),
                                       int(e[-1]), 0, 360,
                                       360 / self.auto_stop_max)
                indicator = [e[0]] + pts[self.auto_stop:].tolist() + [e[0]]
                draw_gl_polyline(indicator, (8., 0.1, 0.1, .8), type='Polygon')
        else:
            pass
コード例 #25
0
    def gl_display(self):
        """
        use gl calls to render
        at least:
            the published position of the reference
        better:
            show the detected postion even if not published
        """

        if self.active:
            draw_gl_point_norm(self.smooth_pos,size=15,color=(1.,1.,0.,.5))

        if self.active and self.detected:
            for e in self.candidate_ellipses:
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,15)
                draw_gl_polyline(pts,(0.,1.,0,1.))

            if self.counter:
                # lets draw an indicator on the count
                e = self.candidate_ellipses[2]
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,360/self.counter_max)
                indicator = [e[0]] + pts[self.counter:].tolist()[::-1] + [e[0]]
                draw_gl_polyline(indicator,(0.1,.5,.7,.8),type='Polygon')

            if self.auto_stop:
                # lets draw an indicator on the autostop count
                e = self.candidate_ellipses[2]
                pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
                                    (int(e[1][0]/2),int(e[1][1]/2)),
                                    int(e[-1]),0,360,360/self.auto_stop_max)
                indicator = [e[0]] + pts[self.auto_stop:].tolist() + [e[0]]
                draw_gl_polyline(indicator,(8.,0.1,0.1,.8),type='Polygon')
        else:
            pass
コード例 #26
0
def draw_circle(pos,r,c):
    pts = cv2.ellipse2Poly(tuple(pos),(r,r),0,0,360,10)
    draw_gl_polyline(pts,c,'Polygon')
コード例 #27
0
ファイル: eye.py プロジェクト: r0bert0/pupil
def eye(g_pool,cap_src,cap_size):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir,'eye.log'),mode='w')
    fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARNING)
    # create formatter and add it to the handlers
    formatter = logging.Formatter('EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter('E Y E Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)


    # Callback functions
    def on_resize(window,w, h):
        adjust_gl_view(w,h,window)
        norm_size = normalize((w,h),glfwGetWindowSize(window))
        fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
        atb.TwWindowSize(*map(int,fb_size))


    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key,int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                if key == GLFW_KEY_ESCAPE:
                    on_close(window)

    def on_char(window,char):
        if not atb.TwEventCharGLFW(char,1):
            pass

    def on_button(window,button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button,int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels
                u_r.setStart(pos)
                bar.draw_roi.value = 1
            else:
                bar.draw_roi.value = 0

    def on_pos(window,x, y):
        norm_pos = normalize((x,y),glfwGetWindowSize(window))
        fb_x,fb_y = denormalize(norm_pos,glfwGetFramebufferSize(window))
        if atb.TwMouseMotion(int(fb_x),int(fb_y)):
            pass

        if bar.draw_roi.value == 1:
            pos = denormalize(norm_pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels
            u_r.setEnd(pos)

    def on_scroll(window,x,y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')


    # Helper functions called by the main atb bar
    def start_roi():
        bar.display.value = 1
        bar.draw_roi.value = 2

    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1. / dt - bar.fps.value)
            bar.dt.value = dt

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value


    # load session persistent settings
    session_settings = shelve.open(os.path.join(g_pool.user_dir,'user_settings_eye'),protocol=2)
    def load(var_name,default):
        return session_settings.get(var_name,default)
    def save(var_name,var):
        session_settings[var_name] = var

    # Initialize capture
    cap = autoCreateCapture(cap_src, cap_size,timebase=g_pool.timebase)

    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    # check if it works
    frame = cap.get_frame()
    if frame.img is None:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return
    height,width = frame.img.shape[:2]

    u_r = Roi(frame.img.shape)
    u_r.set(load('roi',default=None))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    atb.init()
    # Create main ATB Controls
    bar = atb.Bar(name = "Eye", label="Display",
            help="Scene controls", color=(50, 50, 50), alpha=100,
            text='light', position=(10, 10),refresh=.3, size=(200, 100))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.dt = c_float(0.0)
    bar.sleep = c_float(0.0)
    bar.display = c_int(load('bar.display',0))
    bar.draw_pupil = c_bool(load('bar.draw_pupil',True))
    bar.draw_roi = c_int(0)

    dispay_mode_enum = atb.enum("Mode",{"Camera Image":0,
                                        "Region of Interest":1,
                                        "Algorithm":2,
                                        "CPU Save": 3})

    bar.add_var("FPS",bar.fps, step=1.,readonly=True)
    bar.add_var("Mode", bar.display,vtype=dispay_mode_enum, help="select the view-mode")
    bar.add_var("Show_Pupil_Point", bar.draw_pupil)
    bar.add_button("Draw_ROI", start_roi, help="drag on screen to select a region of interest")

    bar.add_var("SlowDown",bar.sleep, step=0.01,min=0.0)
    bar.add_var("SaveSettings&Exit", g_pool.quit)

    cap.create_atb_bar(pos=(220,10))

    # create a bar for the detector
    pupil_detector.create_atb_bar(pos=(10,120))


    glfwInit()
    window = glfwCreateWindow(width, height, "Eye", None, None)
    glfwMakeContextCurrent(window)

    # Register callbacks window
    glfwSetWindowSizeCallback(window,on_resize)
    glfwSetWindowCloseCallback(window,on_close)
    glfwSetKeyCallback(window,on_key)
    glfwSetCharCallback(window,on_char)
    glfwSetMouseButtonCallback(window,on_button)
    glfwSetCursorPosCallback(window,on_pos)
    glfwSetScrollCallback(window,on_scroll)

    glfwSetWindowPos(window,800,0)
    on_resize(window,width,height)

    # gl_state settings
    basic_gl_setup()

    # refresh speed settings
    glfwSwapInterval(0)


    # event loop
    while not g_pool.quit.value:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Capture from Camera Failed. Stopping.")
            break
        except EndofVideoFileError:
            logger.warning("Video File is done. Stopping")
            break

        update_fps()
        sleep(bar.sleep.value) # for debugging only


        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if g_pool.eye_rx.poll():
            command = g_pool.eye_rx.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %s"%record_path)
                video_path = os.path.join(record_path, "eye.avi")
                timestamps_path = os.path.join(record_path, "eye_timestamps.npy")
                writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (frame.img.shape[1], frame.img.shape[0]))
                timestamps = []
            else:
                logger.info("Done recording eye.")
                writer = None
                np.save(timestamps_path,np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write(frame.img)
            timestamps.append(frame.timestamp)


        # pupil ellipse detection
        result = pupil_detector.detect(frame,user_roi=u_r,visualize=bar.display.value == 2)
        # stream the result
        g_pool.pupil_queue.put(result)

        # VISUALIZATION direct visualizations on the frame.img data
        if bar.display.value == 1:
            # and a solid (white) frame around the user defined ROI
            r_img = frame.img[u_r.lY:u_r.uY,u_r.lX:u_r.uX]
            r_img[:,0] = 255,255,255
            r_img[:,-1]= 255,255,255
            r_img[0,:] = 255,255,255
            r_img[-1,:]= 255,255,255



        # GL-drawing
        clear_gl_screen()
        draw_gl_texture(frame.img,update=bar.display.value != 3)

        if result['norm_pupil'] is not None and bar.draw_pupil.value:
            if result.has_key('axes'):
                pts = cv2.ellipse2Poly( (int(result['center'][0]),int(result['center'][1])),
                                        (int(result["axes"][0]/2),int(result["axes"][1]/2)),
                                        int(result["angle"]),0,360,15)
                draw_gl_polyline(pts,(1.,0,0,.5))
            draw_gl_point_norm(result['norm_pupil'],color=(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers(window)
        glfwPollEvents()

    # END while running

    # in case eye reconding was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path,np.asarray(timestamps))


    # save session persistent settings
    save('roi',u_r.get())
    save('bar.display',bar.display.value)
    save('bar.draw_pupil',bar.draw_pupil.value)
    session_settings.close()

    pupil_detector.cleanup()
    cap.close()
    atb.terminate()
    glfwDestroyWindow(window)
    glfwTerminate()

    #flushing queue incase world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
コード例 #28
0
ファイル: eye.py プロジェクト: pangshu2007/CodeForRead
def eye(g_pool):
    """
    this process needs a docstring
    """
    # # callback functions
    def on_resize(w, h):
        atb.TwWindowSize(w, h)
        adjust_gl_view(w, h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key, pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char, pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button, pressed):
            if bar.draw_roi.value:
                if pressed:
                    pos = glfwGetMousePos()
                    pos = normalize(pos, glfwGetWindowSize())
                    pos = denormalize(pos, (img.shape[1], img.shape[0]))  # pos in img pixels
                    r.setStart(pos)
                    bar.draw_roi.value = 1
                else:
                    bar.draw_roi.value = 0

    def on_pos(x, y):
        if atb.TwMouseMotion(x, y):
            pass
        if bar.draw_roi.value == 1:
            pos = glfwGetMousePos()
            pos = normalize(pos, glfwGetWindowSize())
            pos = denormalize(pos, (img.shape[1], img.shape[0]))  # pos in img pixels
            r.setEnd(pos)

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "EYE Process closing from window"

    # initialize capture, check if it works
    cap = autoCreateCapture(g_pool.eye_src, g_pool.eye_size)
    if cap is None:
        print "EYE: Error could not create Capture"
        return
    s, img = cap.read_RGB()
    if not s:
        print "EYE: Error could not get image"
        return
    height, width = img.shape[:2]

    # pupil object
    pupil = Temp()
    pupil.norm_coords = (0.0, 0.0)
    pupil.image_coords = (0.0, 0.0)
    pupil.ellipse = None
    pupil.gaze_coords = (0.0, 0.0)

    try:
        pupil.pt_cloud = np.load("cal_pt_cloud.npy")
        map_pupil = get_map_from_cloud(pupil.pt_cloud, g_pool.world_size)  ###world video size here
    except:
        pupil.pt_cloud = None

        def map_pupil(vector):
            return vector

    r = Roi(img.shape)
    p_r = Roi(img.shape)

    # local object
    l_pool = Temp()
    l_pool.calib_running = False
    l_pool.record_running = False
    l_pool.record_positions = []
    l_pool.record_path = None
    l_pool.writer = None
    l_pool.region_r = 20

    atb.init()
    bar = Bar(
        "Eye",
        g_pool,
        dict(
            label="Controls",
            help="eye detection controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=(10, 10),
            refresh=0.1,
            size=(200, 300),
        ),
    )

    # add 4vl2 camera controls to a seperate ATB bar
    if cap.controls is not None:
        c_bar = atb.Bar(
            name="Camera_Controls",
            label=cap.name,
            help="UVC Camera Controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=(220, 10),
            refresh=2.0,
            size=(200, 200),
        )

        # c_bar.add_var("auto_refresher",vtype=atb.TW_TYPE_BOOL8,getter=cap.uvc_refresh_all,setter=None,readonly=True)
        # c_bar.define(definition='visible=0', varname="auto_refresher")

        sorted_controls = [c for c in cap.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        for control in sorted_controls:
            name = control.atb_name
            if control.type == "bool":
                c_bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val)
            elif control.type == "int":
                c_bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val)
                c_bar.define(definition="min=" + str(control.min), varname=name)
                c_bar.define(definition="max=" + str(control.max), varname=name)
                c_bar.define(definition="step=" + str(control.step), varname=name)
            elif control.type == "menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype = atb.enum(name, control.menu)
                c_bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val)
                if control.menu is None:
                    c_bar.define(definition="min=" + str(control.min), varname=name)
                    c_bar.define(definition="max=" + str(control.max), varname=name)
                    c_bar.define(definition="step=" + str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # c_bar.define(definition='readonly=1',varname=control.name)

        c_bar.add_button("refresh", cap.update_from_device)
        c_bar.add_button("load defaults", cap.load_defaults)

    else:
        c_bar = None

    # Initialize glfw
    glfwInit()
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Eye")
    glfwSetWindowPos(800, 0)
    if isinstance(g_pool.eye_src, str):
        glfwSwapInterval(0)  # turn of v-sync when using video as src for benchmarking

    # register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    # gl_state settings
    import OpenGL.GL as gl

    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glPointSize(20)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    del gl

    # event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        bar.update_fps()
        s, img = cap.read_RGB()
        sleep(bar.sleep.value)  # for debugging only

        ###IMAGE PROCESSING
        gray_img = grayscale(img[r.lY : r.uY, r.lX : r.uX])

        integral = cv2.integral(gray_img)
        integral = np.array(integral, dtype=c_float)
        x, y, w = eye_filter(integral)
        if w > 0:
            p_r.set((y, x, y + w, x + w))
        else:
            p_r.set((0, 0, -1, -1))

        # create view into the gray_img with the bounds of the rough pupil estimation
        pupil_img = gray_img[p_r.lY : p_r.uY, p_r.lX : p_r.uX]

        # pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)),iterations=2)
        if True:
            hist = cv2.calcHist(
                [pupil_img], [0], None, [256], [0, 256]
            )  # (images, channels, mask, histSize, ranges[, hist[, accumulate]])
            bins = np.arange(hist.shape[0])
            spikes = bins[hist[:, 0] > 40]  # every color seen in more than 40 pixels
            if spikes.shape[0] > 0:
                lowest_spike = spikes.min()
            offset = 40

            ##display the histogram
            sx, sy = 100, 1
            colors = ((255, 0, 0), (0, 0, 255), (0, 255, 255))
            h, w, chan = img.shape
            # normalize
            hist *= 1.0 / hist.max()
            for i, h in zip(bins, hist[:, 0]):
                c = colors[1]
                cv2.line(img, (w, int(i * sy)), (w - int(h * sx), int(i * sy)), c)
            cv2.line(img, (w, int(lowest_spike * sy)), (int(w - 0.5 * sx), int(lowest_spike * sy)), colors[0])
            cv2.line(
                img,
                (w, int((lowest_spike + offset) * sy)),
                (int(w - 0.5 * sx), int((lowest_spike + offset) * sy)),
                colors[2],
            )

        # # k-means on the histogram finds peaks but thats no good for us...
        # term_crit = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        # compactness, bestLabels, centers = cv2.kmeans(data=hist, K=2, criteria=term_crit, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
        # cv2.line(img,(0,1),(int(compactness),1),(0,0,0))
        # good_cluster = np.argmax(centers)
        # # A = hist[bestLabels.ravel() == good_cluster]
        # # B = hist[bestLabels.ravel() != good_cluster]
        # bins = np.arange(hist.shape[0])
        # good_bins = bins[bestLabels.ravel() == good_cluster]
        # good_bins_mean = good_bins.sum()/good_bins.shape[0]
        # good_bins_min = good_bins.min()

        # h,w,chan = img.shape
        # for h, i, label in zip(hist[:,0],range(hist.shape[0]), bestLabels.ravel()):
        #     c = colors[label]
        #     cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)

        else:
            # direct k-means on the image is best but expensive
            Z = pupil_img[:: w / 30 + 1, :: w / 30 + 1].reshape((-1, 1))
            Z = np.float32(Z)
            # define criteria, number of clusters(K) and apply kmeans()
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 2.0)
            K = 5
            ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
            offset = 0
            center.sort(axis=0)
            lowest_spike = int(center[1])
            # # Now convert back into uint8, and make original image
            # center = np.uint8(center)
            # res = center[label.flatten()]
            # binary_img = res.reshape((pupil_img.shape))
            # binary_img = bin_thresholding(binary_img,image_upper=res.min()+1)
            # bar.bin_thresh.value = res.min()+1

        bar.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img, image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        cv2.dilate(binary_img, kernel, binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=250)
        cv2.erode(spec_mask, kernel, spec_mask, iterations=1)

        if bar.blur.value > 1:
            pupil_img = cv2.medianBlur(pupil_img, bar.blur.value)

        # create contours using Canny edge dectetion
        contours = cv2.Canny(
            pupil_img,
            bar.canny_thresh.value,
            bar.canny_thresh.value * bar.canny_ratio.value,
            apertureSize=bar.canny_aperture.value,
        )

        # remove contours in areas not dark enough and where the glint (spectral refelction from IR leds)
        contours = cv2.min(contours, spec_mask)
        contours = cv2.min(contours, binary_img)

        # Ellipse fitting from countours
        result = fit_ellipse(
            img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX],
            contours,
            binary_img,
            target_size=bar.pupil_size.value,
            size_tolerance=bar.pupil_size_tolerance.value,
        )

        # # Vizualizations
        overlay = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB)  # create an RGB view onto the gray pupil ROI
        overlay[:, :, 0] = cv2.max(pupil_img, contours)  # green channel
        overlay[:, :, 2] = cv2.max(pupil_img, binary_img)  # blue channel
        overlay[:, :, 1] = cv2.min(pupil_img, spec_mask)  # red channel

        # draw a blue dotted frame around the automatic pupil ROI in overlay...
        overlay[::2, 0] = 0, 0, 255
        overlay[::2, -1] = 0, 0, 255
        overlay[0, ::2] = 0, 0, 255
        overlay[-1, ::2] = 0, 0, 255

        # and a solid (white) frame around the user defined ROI
        gray_img[:, 0] = 255
        gray_img[:, -1] = 255
        gray_img[0, :] = 255
        gray_img[-1, :] = 255

        if bar.display.value == 0:
            img = img
        elif bar.display.value == 1:
            img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
        elif bar.display.value == 2:
            img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
            img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX] = overlay
        elif bar.display.value == 3:
            img = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB)
        else:
            pass

        if result is not None:
            pupil.ellipse, others = result
            pupil.image_coords = r.add_vector(p_r.add_vector(pupil.ellipse["center"]))
            # update pupil size,angle and ratio for the ellipse filter algorithm
            bar.pupil_size.value = bar.pupil_size.value + 0.5 * (pupil.ellipse["major"] - bar.pupil_size.value)
            bar.pupil_ratio.value = bar.pupil_ratio.value + 0.7 * (pupil.ellipse["ratio"] - bar.pupil_ratio.value)
            bar.pupil_angle.value = bar.pupil_angle.value + 1.0 * (pupil.ellipse["angle"] - bar.pupil_angle.value)

            # if pupil found tighten the size tolerance
            bar.pupil_size_tolerance.value -= 1
            bar.pupil_size_tolerance.value = max(10, min(50, bar.pupil_size_tolerance.value))

            # clamp pupil size
            bar.pupil_size.value = max(20, min(300, bar.pupil_size.value))

            # normalize
            pupil.norm_coords = normalize(pupil.image_coords, (img.shape[1], img.shape[0]), flip_y=True)

            # from pupil to gaze
            pupil.gaze_coords = map_pupil(pupil.norm_coords)
            g_pool.gaze_x.value, g_pool.gaze_y.value = pupil.gaze_coords

        else:
            pupil.ellipse = None
            g_pool.gaze_x.value, g_pool.gaze_y.value = 0.0, 0.0
            pupil.gaze_coords = None  # whithout this line the last know pupil position is recorded if none is found

            bar.pupil_size_tolerance.value += 1

        ###CALIBRATION###
        # Initialize Calibration (setup variables and lists)
        if g_pool.calibrate.value and not l_pool.calib_running:
            l_pool.calib_running = True
            pupil.pt_cloud = []

        # While Calibrating...
        if l_pool.calib_running and ((g_pool.ref_x.value != 0) or (g_pool.ref_y.value != 0)) and pupil.ellipse:
            pupil.pt_cloud.append([pupil.norm_coords[0], pupil.norm_coords[1], g_pool.ref_x.value, g_pool.ref_y.value])

        # Calculate mapping coefs
        if not g_pool.calibrate.value and l_pool.calib_running:
            l_pool.calib_running = 0
            if pupil.pt_cloud:  # some data was actually collected
                print "Calibrating with", len(pupil.pt_cloud), "collected data points."
                map_pupil = get_map_from_cloud(np.array(pupil.pt_cloud), g_pool.world_size, verbose=True)
                np.save("cal_pt_cloud.npy", np.array(pupil.pt_cloud))

        ###RECORDING###
        # Setup variables and lists for recording
        if g_pool.pos_record.value and not l_pool.record_running:
            l_pool.record_path = g_pool.eye_rx.recv()
            print "l_pool.record_path: ", l_pool.record_path

            video_path = path.join(l_pool.record_path, "eye.avi")
            # FFV1 -- good speed lossless big file
            # DIVX -- good speed good compression medium file
            if bar.record_eye.value:
                l_pool.writer = cv2.VideoWriter(
                    video_path, cv2.cv.CV_FOURCC(*"DIVX"), bar.fps.value, (img.shape[1], img.shape[0])
                )
            l_pool.record_positions = []
            l_pool.record_running = True

        # While recording...
        if l_pool.record_running:
            if pupil.gaze_coords is not None:
                l_pool.record_positions.append(
                    [
                        pupil.gaze_coords[0],
                        pupil.gaze_coords[1],
                        pupil.norm_coords[0],
                        pupil.norm_coords[1],
                        bar.dt,
                        g_pool.frame_count_record.value,
                    ]
                )
            if l_pool.writer is not None:
                l_pool.writer.write(cv2.cvtColor(img, cv2.cv.COLOR_BGR2RGB))

        # Done Recording: Save values and flip switch to off for recording
        if not g_pool.pos_record.value and l_pool.record_running:
            positions_path = path.join(l_pool.record_path, "gaze_positions.npy")
            cal_pt_cloud_path = path.join(l_pool.record_path, "cal_pt_cloud.npy")
            np.save(positions_path, np.asarray(l_pool.record_positions))
            try:
                np.save(cal_pt_cloud_path, np.asarray(pupil.pt_cloud))
            except:
                print "Warning: No calibration data associated with this recording."
            l_pool.writer = None
            l_pool.record_running = False

        ### GL-drawing
        clear_gl_screen()
        draw_gl_texture(img)

        if bar.draw_pupil and pupil.ellipse:
            pts = cv2.ellipse2Poly(
                (int(pupil.image_coords[0]), int(pupil.image_coords[1])),
                (int(pupil.ellipse["axes"][0] / 2), int(pupil.ellipse["axes"][1] / 2)),
                int(pupil.ellipse["angle"]),
                0,
                360,
                15,
            )
            draw_gl_polyline(pts, (1.0, 0, 0, 0.5))
            draw_gl_point_norm(pupil.norm_coords, (1.0, 0.0, 0.0, 0.5))

        atb.draw()
        glfwSwapBuffers()

    # end while running
    print "EYE Process closed"
    r.save()
    bar.save()
    atb.terminate()
    glfwCloseWindow()
    glfwTerminate()
コード例 #29
0
def draw_circle(pos,r,c):
    pts = cv2.ellipse2Poly(tuple(pos),(r,r),0,0,360,10)
    draw_gl_polyline(pts,c,'Polygon')
コード例 #30
0
def eye(g_pool, cap_src, cap_size):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir, 'eye.log'),
                             mode='w')
    fh.setLevel(logging.INFO)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARNING)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        'EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter(
        'E Y E Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    # Callback functions
    def on_resize(window, w, h):
        adjust_gl_view(w, h)
        atb.TwWindowSize(w, h)

    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key, int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                if key == GLFW_KEY_ESCAPE:
                    on_close(window)

    def on_char(window, char):
        if not atb.TwEventCharGLFW(char, 1):
            pass

    def on_button(window, button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button, int(action == GLFW_PRESS)):
            if action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos, glfwGetWindowSize(window))
                pos = denormalize(
                    pos, (frame.img.shape[1],
                          frame.img.shape[0]))  # pos in frame.img pixels
                u_r.setStart(pos)
                bar.draw_roi.value = 1
            else:
                bar.draw_roi.value = 0

    def on_pos(window, x, y):
        if atb.TwMouseMotion(int(x), int(y)):
            pass
        if bar.draw_roi.value == 1:
            pos = x, y
            pos = normalize(pos, glfwGetWindowSize(window))
            pos = denormalize(pos,
                              (frame.img.shape[1],
                               frame.img.shape[0]))  # pos in frame.img pixels
            u_r.setEnd(pos)

    def on_scroll(window, x, y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')

    # Helper functions called by the main atb bar
    def start_roi():
        bar.display.value = 1
        bar.draw_roi.value = 2

    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1. / dt - bar.fps.value)
            bar.dt.value = dt

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    # load session persistent settings
    session_settings = shelve.open(os.path.join(g_pool.user_dir,
                                                'user_settings_eye'),
                                   protocol=2)

    def load(var_name, default):
        return session_settings.get(var_name, default)

    def save(var_name, var):
        session_settings[var_name] = var

    # Initialize capture
    cap = autoCreateCapture(cap_src, cap_size)

    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    # check if it works
    frame = cap.get_frame()
    if frame.img is None:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return
    height, width = frame.img.shape[:2]
    cap.auto_rewind = False

    u_r = Roi(frame.img.shape)
    u_r.set(load('roi', default=None))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    atb.init()
    # Create main ATB Controls
    bar = atb.Bar(name="Eye",
                  label="Display",
                  help="Scene controls",
                  color=(50, 50, 50),
                  alpha=100,
                  text='light',
                  position=(10, 10),
                  refresh=.3,
                  size=(200, 100))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.dt = c_float(0.0)
    bar.sleep = c_float(0.0)
    bar.display = c_int(load('bar.display', 0))
    bar.draw_pupil = c_bool(load('bar.draw_pupil', True))
    bar.draw_roi = c_int(0)

    dispay_mode_enum = atb.enum("Mode", {
        "Camera Image": 0,
        "Region of Interest": 1,
        "Algorithm": 2
    })

    bar.add_var("FPS", bar.fps, step=1., readonly=True)
    bar.add_var("Mode",
                bar.display,
                vtype=dispay_mode_enum,
                help="select the view-mode")
    bar.add_var("Show_Pupil_Point", bar.draw_pupil)
    bar.add_button("Draw_ROI",
                   start_roi,
                   help="drag on screen to select a region of interest")

    bar.add_var("SlowDown", bar.sleep, step=0.01, min=0.0)
    bar.add_var("SaveSettings&Exit", g_pool.quit)

    cap.create_atb_bar(pos=(220, 10))

    # create a bar for the detector
    pupil_detector.create_atb_bar(pos=(10, 120))

    glfwInit()
    window = glfwCreateWindow(width, height, "Eye", None, None)
    glfwMakeContextCurrent(window)

    # Register callbacks window
    glfwSetWindowSizeCallback(window, on_resize)
    glfwSetWindowCloseCallback(window, on_close)
    glfwSetKeyCallback(window, on_key)
    glfwSetCharCallback(window, on_char)
    glfwSetMouseButtonCallback(window, on_button)
    glfwSetCursorPosCallback(window, on_pos)
    glfwSetScrollCallback(window, on_scroll)

    glfwSetWindowPos(window, 800, 0)
    on_resize(window, width, height)

    # gl_state settings
    basic_gl_setup()

    # refresh speed settings
    glfwSwapInterval(0)

    # event loop
    while not g_pool.quit.value:
        frame = cap.get_frame()
        if frame.img is None:
            break
        update_fps()
        sleep(bar.sleep.value)  # for debugging only

        if pupil_detector.should_sleep:
            sleep(16)
            pupil_detector.should_sleep = False

        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if g_pool.eye_rx.poll():
            command = g_pool.eye_rx.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %(record_path)s")
                video_path = os.path.join(record_path, "eye.avi")
                timestamps_path = os.path.join(record_path,
                                               "eye_timestamps.npy")
                writer = cv2.VideoWriter(
                    video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value,
                    (frame.img.shape[1], frame.img.shape[0]))
                timestamps = []
            else:
                logger.info("Done recording eye.")
                writer = None
                np.save(timestamps_path, np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write(frame.img)
            timestamps.append(frame.timestamp)

        # pupil ellipse detection
        result = pupil_detector.detect(frame,
                                       user_roi=u_r,
                                       visualize=bar.display.value == 2)
        # stream the result
        g_pool.pupil_queue.put(result)

        # VISUALIZATION direct visualizations on the frame.img data
        if bar.display.value == 1:
            # and a solid (white) frame around the user defined ROI
            r_img = frame.img[u_r.lY:u_r.uY, u_r.lX:u_r.uX]
            r_img[:, 0] = 255, 255, 255
            r_img[:, -1] = 255, 255, 255
            r_img[0, :] = 255, 255, 255
            r_img[-1, :] = 255, 255, 255

        # GL-drawing
        clear_gl_screen()
        draw_gl_texture(frame.img)

        if result['norm_pupil'] is not None and bar.draw_pupil.value:
            if result.has_key('axes'):
                pts = cv2.ellipse2Poly(
                    (int(result['center'][0]), int(result['center'][1])),
                    (int(result["axes"][0] / 2), int(result["axes"][1] / 2)),
                    int(result["angle"]), 0, 360, 15)
                draw_gl_polyline(pts, (1., 0, 0, .5))
            draw_gl_point_norm(result['norm_pupil'], color=(1., 0., 0., 0.5))

        atb.draw()
        glfwSwapBuffers(window)
        glfwPollEvents()

    # END while running

    # in case eye reconding was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path, np.asarray(timestamps))

    # save session persistent settings
    save('roi', u_r.get())
    save('bar.display', bar.display.value)
    save('bar.draw_pupil', bar.draw_pupil.value)
    session_settings.close()

    pupil_detector.cleanup()
    cap.close()
    atb.terminate()
    glfwDestroyWindow(window)
    glfwTerminate()

    #flushing queue incase world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")