Пример #1
0
    def __init__(self, g_pool, mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',
                                        []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'offline_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',
                                          []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'realtime_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

        # ui mode settings
        self.mode = mode
        self.min_marker_perimeter = 20  #if we make this a slider we need to invalidate the cache on change.
        # edit surfaces
        self.edit_surfaces = []

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None
Пример #2
0
    def __init__(self,g_pool,mode="Show Markers and Surfaces",min_marker_perimeter = 100,invert_image=False,robust_detection=True):
        super(Offline_Surface_Tracker, self).__init__(g_pool,mode,min_marker_perimeter,robust_detection)
        self.order = .2

        if g_pool.app == 'capture':
           raise Exception('For Player only.')

        self.marker_cache_version = 2
        self.min_marker_perimeter_cacher = 20  #find even super small markers. The surface locater will filter using min_marker_perimeter
        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        version = self.persistent_cache.get('version',0)
        cache = self.persistent_cache.get('marker_cache',None)
        if cache is None:
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            self.persistent_cache['version'] = self.marker_cache_version
        elif version != self.marker_cache_version:
            self.persistent_cache['version'] = self.marker_cache_version
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            logger.debug("Marker cache version missmatch. Rebuilding marker cache.")
        else:
            self.cache = Cache_List(cache)
            logger.debug("Loaded marker cache {} / {} frames had been searched before".format(len(self.cache)-self.cache.count(False),len(self.cache)) )

        self.init_marker_cacher()
        for s in self.surfaces:
            s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence)
        self.recalculate()
Пример #3
0
    def __init__(self,g_pool,gui_settings={'pos':(220,200),'size':(300,300),'iconified':False}):
        super(Offline_Marker_Detector, self).__init__()
        self.g_pool = g_pool
        self.gui_settings = gui_settings
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.load('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.load('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = c_int(0)
        # edit surfaces
        self.edit_surfaces = []

        #detector vars
        self.robust_detection = c_bool(1)
        self.aperture = c_int(11)
        self.min_marker_perimeter = 80

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)
        self.recent_pupil_positions = []

        self.img_shape = None
        self.img = None
    def __init__(self,g_pool,mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = None
        self.surfaces = None
        self.load_surface_definitions_from_file()

        # ui mode settings
        self.mode = mode
        self.min_marker_perimeter = 20  #if we make this a slider we need to invalidate the cache on change.
        # edit surfaces
        self.edit_surfaces = []


        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None
Пример #5
0
 def load_marker_cache(self):
     #check if marker cache is available from last session
     self.persistent_cache = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'square_marker_cache'))
     version = self.persistent_cache.get('version',0)
     cache = self.persistent_cache.get('marker_cache',None)
     if cache is None:
         self.cache = Cache_List([False for _ in self.g_pool.timestamps])
         self.persistent_cache['version'] = self.marker_cache_version
         self.persistent_cache['inverted_markers'] = self.invert_image
     elif version != self.marker_cache_version:
         self.persistent_cache['version'] = self.marker_cache_version
         self.invert_image = self.persistent_cache.get('inverted_markers',False)
         self.cache = Cache_List([False for _ in self.g_pool.timestamps])
         logger.debug("Marker cache version missmatch. Rebuilding marker cache.")
     else:
         self.cache = Cache_List(cache)
         #we overwrite the inverted_image setting from init with the one save in the marker cache.
         self.invert_image = self.persistent_cache.get('inverted_markers',False)
         logger.debug("Loaded marker cache {} / {} frames had been searched before".format(len(self.cache)-self.cache.count(False),len(self.cache)) )
Пример #6
0
 def init_cache(self, marker_cache):
     if self.defined:
         logger.debug("Full update of surface '%s' positons cache" %
                      self.name)
         self.cache = Cache_List([
             self.answer_caching_request(marker_cache, i)
             for i in xrange(len(marker_cache))
         ],
                                 positive_eval_fn=lambda x:
                                 (x != False) and (x != None))
Пример #7
0
 def init_cache(self, marker_cache, min_marker_perimeter,
                min_id_confidence):
     if self.defined:
         logger.debug("Full update of surface '{}' positons cache".format(
             self.name))
         self.cache = Cache_List([
             self.answer_caching_request(
                 marker_cache, i, min_marker_perimeter, min_id_confidence)
             for i in range(len(marker_cache))
         ],
                                 positive_eval_fn=lambda x:
                                 (x is not False) and (x is not None))
Пример #8
0
 def init_cache(self, marker_cache, camera_calibration,
                min_marker_perimeter, min_id_confidence):
     if self.defined:
         logger.debug("Full update of surface '%s' positons cache" %
                      self.name)
         self.cache = Cache_List([
             self.answer_caching_request(
                 marker_cache, i, camera_calibration, min_marker_perimeter,
                 min_id_confidence) for i in xrange(len(marker_cache))
         ],
                                 positive_eval_fn=lambda x:
                                 (x != False) and (x != None))
Пример #9
0
 def load_marker_cache(self):
     #check if marker cache is available from last session
     self.persistent_cache = Persistent_Dict(
         os.path.join(self.g_pool.rec_dir, 'square_marker_cache'))
     version = self.persistent_cache.get('version', 0)
     cache = self.persistent_cache.get('marker_cache', None)
     if cache is None:
         self.cache = Cache_List([False for _ in self.g_pool.timestamps])
         self.persistent_cache['version'] = self.marker_cache_version
         self.persistent_cache['inverted_markers'] = self.invert_image
     elif version != self.marker_cache_version:
         self.persistent_cache['version'] = self.marker_cache_version
         self.invert_image = self.persistent_cache.get(
             'inverted_markers', False)
         self.cache = Cache_List([False for _ in self.g_pool.timestamps])
         logger.debug(
             "Marker cache version missmatch. Rebuilding marker cache.")
     else:
         self.cache = Cache_List(cache)
         #we overwrite the inverted_image setting from init with the one save in the marker cache.
         self.invert_image = self.persistent_cache.get(
             'inverted_markers', False)
         logger.debug(
             "Loaded marker cache {} / {} frames had been searched before".
             format(
                 len(self.cache) - self.cache.count(False),
                 len(self.cache)))
Пример #10
0
    def __init__(self,g_pool,mode="Show Markers and Surfaces",min_marker_perimeter = 100,invert_image=False,robust_detection=True):
        super().__init__(g_pool,mode,min_marker_perimeter,robust_detection)
        self.order = .2

        if g_pool.app == 'capture':
           raise Exception('For Player only.')

        self.marker_cache_version = 2
        self.min_marker_perimeter_cacher = 20  #find even super small markers. The surface locater will filter using min_marker_perimeter
        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        version = self.persistent_cache.get('version',0)
        cache = self.persistent_cache.get('marker_cache',None)
        if cache is None:
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            self.persistent_cache['version'] = self.marker_cache_version
        elif version != self.marker_cache_version:
            self.persistent_cache['version'] = self.marker_cache_version
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            logger.debug("Marker cache version missmatch. Rebuilding marker cache.")
        else:
            self.cache = Cache_List(cache)
            logger.debug("Loaded marker cache {} / {} frames had been searched before".format(len(self.cache)-self.cache.count(False),len(self.cache)) )

        self.init_marker_cacher()
        for s in self.surfaces:
            s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence)
        self.recalculate()
Пример #11
0
    def __init__(self, g_pool, mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = None
        self.surfaces = None
        self.load_surface_definitions_from_file()

        # ui mode settings
        self.mode = mode
        self.min_marker_perimeter = 20  #if we make this a slider we need to invalidate the cache on change.
        # edit surfaces
        self.edit_surfaces = []

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None
Пример #12
0
    def __init__(self,g_pool,mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = mode
        self.min_marker_perimeter = 20  #if we make this a slider we need to invalidate the cache on change.
        # edit surfaces
        self.edit_surfaces = []


        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None
Пример #13
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self, g_pool, mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',
                                        []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'offline_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',
                                          []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'realtime_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

        # ui mode settings
        self.mode = mode
        # edit surfaces
        self.edit_surfaces = []

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None

    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Marker Tracker')
        self.g_pool.gui.append(self.menu)

        self.add_button = ui.Thumb('add_surface',
                                   setter=self.add_surface,
                                   getter=lambda: False,
                                   label='Add Surface',
                                   hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),
                              *glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu = None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        pass
        self.menu.elements[:] = []
        self.menu.append(
            ui.Info_Text(
                'The offline marker tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'
            ))
        self.menu.append(ui.Button('Close', self.close))
        self.menu.append(
            ui.Selector('mode',
                        self,
                        label='Mode',
                        selection=[
                            "Show Markers and Frames", "Show marker IDs",
                            "Surface edit mode", "Show Heatmaps",
                            "Show Metrics"
                        ]))
        self.menu.append(
            ui.Info_Text(
                'To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'
            ))
        self.menu.append(
            ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(
            ui.Button("Export gaze and surface data",
                      self.save_surface_statsics_to_file))
        self.menu.append(
            ui.Button("Add surface", lambda: self.add_surface('_')))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s" % idx)
            s_menu.collapsed = True
            s_menu.append(ui.Text_Input('name', s))
            s_menu.append(ui.Text_Input('x', s.real_world_size,
                                        label='X size'))
            s_menu.append(ui.Text_Input('y', s.real_world_size,
                                        label='Y size'))
            s_menu.append(ui.Button('Open Debug Window', s.open_close_window))

            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)

            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove', remove_s))
            self.menu.append(s_menu)

    def close(self):
        self.alive = False

    def on_window_resize(self, window, w, h):
        self.win_size = w, h

    def on_click(self, pos, button, action):
        if self.mode == "Surface edit mode":
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0., 0.), (1., 0.), (1., 1.), (0., 1.))
                    x, y = pos
                    for s in self.surfaces:
                        if s.detected and s.defined:
                            for (vx, vy), i in zip(
                                    s.ref_surface_to_img(np.array(surf_verts)),
                                    range(4)):
                                vx, vy = denormalize(
                                    (vx, vy),
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                                if sqrt((x - vx)**2 +
                                        (y - vy)**2) < 15:  #img pixels
                                    self.edit_surfaces.append((s, i))

    def advance(self):
        pass

    def add_surface(self, _):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def remove_surface(self, i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_gui_markers()

    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark, out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results, dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255. / max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s, c_map in zip(self.surfaces, results_c_maps):
            heatmap = np.ones((1, 1, 4), dtype=np.uint8) * 125
            heatmap[:, :, :3] = c_map
            s.metrics_texture = create_named_texture(heatmap.shape)
            update_named_texture(s.metrics_texture, heatmap)

    def update(self, frame, events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(
                frame.index
            )  # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                pass
                # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img, self.markers)

        # edit surfaces by user
        if self.mode == "Surface edit mode":
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(window), flip_y=True)

            for s, v_idx in self.edit_surfaces:
                if s.detected:
                    new_pos = s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx, new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)

        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()

    def init_marker_cacher(self):
        forking_enable(0)  #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path = os.path.join(self.g_pool.rec_dir, 'world.mkv')
        if not os.path.isfile(video_file_path):
            video_file_path = os.path.join(self.g_pool.rec_dir, 'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i', 0)
        self.cacher_run = Value(c_bool, True)
        self.cacher = Process(target=fill_cache,
                              args=(visited_list, video_file_path,
                                    self.cache_queue, self.cacher_seek_idx,
                                    self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx, c_m = self.cache_queue.get()
            self.cache.update(idx, c_m)
            for s in self.surfaces:
                s.update_cache(self.cache, idx=idx)

    def seek_marker_cacher(self, idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode == "Show Markers and Frames":
            for m in self.markers:
                hat = np.array([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
                               dtype=np.float32)
                hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
                draw_polyline(hat.reshape((5, 2)),
                              color=RGBA(0.1, 1., 1., .3),
                              line_type=GL_POLYGON)
                draw_polyline(hat.reshape((5, 2)), color=RGBA(0.1, 1., 1., .6))

            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)

        if self.mode == "Surface edit mode":
            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)
                s.gl_draw_corners()

        if self.mode == "Show Heatmaps":
            for s in self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

        # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
            cached_ranges += (r[0], 0), (r[1], 0)  #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                    found_at += (r[0], 0), (r[1], 0
                                            )  #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = self.win_size
        h_pad = padding * (self.cache.length - 2) / float(width)
        v_pad = padding * 1. / (height - 2)
        glOrtho(
            -h_pad, (self.cache.length - 1) + h_pad, -v_pad, 1 + v_pad, -1, 1
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(8., .6, .2, 8.)
        draw_polyline(cached_ranges,
                      color=color,
                      line_type=GL_LINES,
                      thickness=4)

        color = RGBA(0., .7, .3, 8.)

        for s in cached_surfaces:
            glTranslatef(0, .02, 0)
            draw_polyline(s, color=color, line_type=GL_LINES, thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()

    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark, out_mark)

        metrics_dir = os.path.join(self.g_pool.rec_dir,
                                   "metrics_%s-%s" % (in_mark, out_mark))
        logger.info("exporting metrics to %s" % metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s" % metrics_dir)
                return

        with open(os.path.join(metrics_dir, 'surface_visibility.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count', frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning(
                        "The surface is not cached. Please wait for the cacher to collect data."
                    )
                    return
                visible_count = s.visible_count_in_section(section)
                csv_writer.writerow((s.name, visible_count))
            logger.info("Created 'surface_visibility.csv' file")

        with open(os.path.join(metrics_dir, 'surface_gaze_distribution.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(
                chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(
                ('total_gaze_point_count', len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'gaze_count'))

            for s in self.surfaces:
                gaze_on_srf = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set(
                    [gp['base']["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow((s.name, len(gaze_on_srf)))

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf)))
            logger.info("Created 'surface_gaze_distribution.csv' file")

        with open(os.path.join(metrics_dir, 'surface_events.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number', 'timestamp', 'surface_name',
                                 'surface_uid', 'event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
                    events.append({
                        'frame_id': enter_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'enter'
                    })
                    events.append({
                        'frame_id': exit_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'exit'
                    })

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow(
                    (e['frame_id'], self.g_pool.timestamps[e['frame_id']],
                     e['srf_name'], e['srf_uid'], e['event']))
            logger.info("Created 'surface_events.csv' file")

        for s in self.surfaces:
            # per surface names:
            surface_name = '_' + s.name.replace('/', '') + '_' + s.uid

            # save surface_positions as pickle file
            save_object(
                s.cache.to_list(),
                os.path.join(metrics_dir, 'srf_positions' + surface_name))

            #save surface_positions as csv
            with open(
                    os.path.join(metrics_dir,
                                 'srf_positons' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                     'm_from_screen', 'detected_markers'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow(
                                (idx, ts, ref_srf_data['m_to_screen'],
                                 ref_srf_data['m_from_screen'],
                                 ref_srf_data['detected_markers']))

            # save gaze on srf as csv.
            with open(
                    os.path.join(
                        metrics_dir,
                        'gaze_positions_on_surface' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(
                    ('world_timestamp', 'world_frame_idx', 'gaze_timestamp',
                     'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                csv_writer.writerow(
                                    (ts, idx, gp['base']['timestamp'],
                                     gp['norm_pos'][0], gp['norm_pos'][1],
                                     gp['norm_pos'][0] *
                                     s.real_world_size['x'],
                                     gp['norm_pos'][1] *
                                     s.real_world_size['y'], gp['on_srf']))

            # # save fixation on srf as csv.
            with open(
                    os.path.join(
                        metrics_dir,
                        'fixations_on_surface' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(
                    ('id', 'start_timestamp', 'duration', 'start_frame',
                     'end_frame', 'norm_pos_x', 'norm_pos_y', 'x_scaled',
                     'y_scaled', 'on_srf'))
                fixations_on_surface = []
                for idx, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)), s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_dublicates = dict([
                    (f['base']['id'], f) for f in fixations_on_surface
                ]).values()
                for f_on_s in removed_dublicates:
                    f = f_on_s['base']
                    f_x, f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow(
                        (f['id'], f['timestamp'], f['duration'],
                         f['start_frame_index'], f['end_frame_index'], f_x,
                         f_y, f_x * s.real_world_size['x'],
                         f_y * s.real_world_size['y'], f_on_srf))

            logger.info(
                "Saved surface positon gaze and fixation data for '%s' with uid:'%s'"
                % (s.name, s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(
                    os.path.join(metrics_dir,
                                 'heatmap' + surface_name + '.png'), s.heatmap)

        logger.info("Done exporting reference surface data.")
        # if s.detected and self.img is not None:
        #     #let save out the current surface image found in video

        #     #here we get the verts of the surface quad in norm_coords
        #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
        #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
        #     #now we convert to image pixel coods
        #     screen_space[:,1] = 1-screen_space[:,1]
        #     screen_space[:,1] *= self.img.shape[0]
        #     screen_space[:,0] *= self.img.shape[1]
        #     s_0,s_1 = s.real_world_size
        #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
        #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
        #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
        #     #here we do the actual perspactive transform of the image.
        #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
        #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
        #     logger.info("Saved current image as .png file.")
        # else:
        #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)

    def get_init_dict(self):
        return {'mode': self.mode}

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [
            rs.save_to_dict() for rs in self.surfaces if rs.defined
        ]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
Пример #14
0
class Offline_Surface_Tracker(Surface_Tracker):
    """
    Special version of surface tracker for use with videofile source.
    It uses a seperate process to search all frames in the world video file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,mode="Show Markers and Surfaces",min_marker_perimeter = 100,invert_image=False,robust_detection=True):
        super(Offline_Surface_Tracker, self).__init__(g_pool,mode,min_marker_perimeter,robust_detection)
        self.order = .2

        if g_pool.app == 'capture':
           raise Exception('For Player only.')

        self.marker_cache_version = 2
        self.min_marker_perimeter_cacher = 20  #find even super small markers. The surface locater will filter using min_marker_perimeter
        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        version = self.persistent_cache.get('version',0)
        cache = self.persistent_cache.get('marker_cache',None)
        if cache is None:
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            self.persistent_cache['version'] = self.marker_cache_version
        elif version != self.marker_cache_version:
            self.persistent_cache['version'] = self.marker_cache_version
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            logger.debug("Marker cache version missmatch. Rebuilding marker cache.")
        else:
            self.cache = Cache_List(cache)
            logger.debug("Loaded marker cache {} / {} frames had been searched before".format(len(self.cache)-self.cache.count(False),len(self.cache)) )

        self.init_marker_cacher()
        for s in self.surfaces:
            s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence)
        self.recalculate()

    def load_surface_definitions_from_file(self):
        self.surface_definitions = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Surface Tracker')
        self.g_pool.gui.append(self.menu)
        self.add_button = ui.Thumb('add_surface',setter=lambda x: self.add_surface(),getter=lambda:False,label='A',hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu= None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        def close():
            self.alive=False

        def set_min_marker_perimeter(val):
            self.min_marker_perimeter = val
            self.notify_all({'subject':'min_marker_perimeter_changed','delay':1})

        self.menu.elements[:] = []
        self.menu.append(ui.Button('Close',close))
        self.menu.append(ui.Slider('min_marker_perimeter',self,min=20,max=500,step=1,setter=set_min_marker_perimeter))
        self.menu.append(ui.Info_Text('The offline surface tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'))
        self.menu.append(ui.Info_Text("Press the export button or type 'e' to start the export."))
        self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Surfaces","Show marker IDs","Show Heatmaps","Show Metrics"] ))
        self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'))
        self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(ui.Button("Add surface", lambda:self.add_surface()))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface {}".format(idx))
            s_menu.collapsed=True
            s_menu.append(ui.Text_Input('name',s))
            s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size'))
            s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size'))
            s_menu.append(ui.Button('Open Debug Window',s.open_close_window))
            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)
            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove',remove_s))
            self.menu.append(s_menu)


    def on_notify(self,notification):
        if notification['subject'] == 'gaze_positions_changed':
            logger.info('Gaze postions changed. Recalculating.')
            self.recalculate()
        if notification['subject'] == 'min_data_confidence_changed':
            logger.info('Min_data_confidence changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'surfaces_changed':
            logger.info('Surfaces changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'min_marker_perimeter_changed':
            logger.info('Min marker perimeter adjusted. Re-detecting surfaces.')
            self.invalidate_surface_caches()
        elif notification['subject'] is "should_export":
            self.save_surface_statsics_to_file(notification['range'],notification['export_dir'])


    def on_window_resize(self,window,w,h):
        self.win_size = w,h


    def add_surface(self):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = Named_Texture()
            s.metrics_texture.update_from_ndarray(heatmap)


    def invalidate_surface_caches(self):
        for s in self.surfaces:
            s.cache = None

    def update(self,frame,events):
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        # self.markers = [m for m in self.cache[frame.index] if m['perimeter'>=self.min_marker_perimeter]
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed


        events['surfaces'] = []
        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence)
            if s.detected:
                events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img,self.markers)

        elif self.mode == "Show Markers and Surfaces":
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos =  s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)
            else:
                # update srf with no or invald cache:
                for s in self.surfaces:
                    if s.cache == None and s not in [s for s,i in self.edit_surf_verts]:
                        s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence)
                        self.notify_all({'subject':'surfaces_changed','delay':1})



        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        if system() == 'Darwin':
            forking_enable(0)

        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  self.g_pool.capture.source_path
        timestamps = self.g_pool.capture.timestamps
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i',0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,camera_calibration=self.camera_calibration,min_marker_perimeter=self.min_marker_perimeter,min_id_confidence=self.min_id_confidence,idx=idx)
            if self.cacher_run.value == False:
                self.recalculate()

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()

        super(Offline_Surface_Tracker,self).gl_display()

        if self.mode == "Show Heatmaps":
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 30.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        glOrtho(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(.8,.6,.2,.8)
        draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4)

        color = RGBA(0,.7,.3,.8)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_polyline(s,color=color,line_type=GL_LINES,thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self,export_range,export_dir):
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        metrics_dir = os.path.join(export_dir,'surfaces')
        section = export_range
        in_mark = export_range.start
        out_mark = export_range.stop
        logger.info("exporting metrics to {}".format(metrics_dir))
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir {}".format(metrics_dir))
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'w',encoding='utf-8',newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'w',encoding='utf-8',newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp['base_data']['timestamp'] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'w',encoding='utf-8',newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'w',encoding='utf-8',newline='') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'w',encoding='utf-8',newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(('world_timestamp','world_frame_idx','gaze_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                csv_writer.writerow( (ts,idx,gp['base_data']['timestamp'],gp['norm_pos'][0],gp['norm_pos'][1],gp['norm_pos'][0]*s.real_world_size['x'],gp['norm_pos'][1]*s.real_world_size['y'],gp['on_srf']) )


            # save fixation on srf as csv.
            with open(os.path.join(metrics_dir,'fixations_on_surface'+surface_name+'.csv'),'w',encoding='utf-8',newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(('id','start_timestamp','duration','start_frame','end_frame','norm_pos_x','norm_pos_y','x_scaled','y_scaled','on_srf'))
                fixations_on_surface = []
                for idx,ref_srf_data in zip(range(len(self.g_pool.timestamps)),s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_dublicates = dict([(f['base_data']['id'],f) for f in fixations_on_surface]).values()
                for f_on_s in removed_dublicates:
                    f = f_on_s['base_data']
                    f_x,f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow( (f['id'],f['timestamp'],f['duration'],f['start_frame_index'],f['end_frame_index'],f_x,f_y,f_x*s.real_world_size['x'],f_y*s.real_world_size['y'],f_on_srf) )


            logger.info("Saved surface positon gaze and fixation data for '{}' with uid:'{}'".format(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)


        logger.info("Done exporting reference surface data.")
        # if s.detected and self.img is not None:
        #     #let save out the current surface image found in video

        #     #here we get the verts of the surface quad in norm_coords
        #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
        #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
        #     #now we convert to image pixel coods
        #     screen_space[:,1] = 1-screen_space[:,1]
        #     screen_space[:,1] *= self.img.shape[0]
        #     screen_space[:,0] *= self.img.shape[1]
        #     s_0,s_1 = s.real_world_size
        #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
        #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
        #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
        #     #here we do the actual perspactive transform of the image.
        #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
        #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
        #     logger.info("Saved current image as .png file.")
        # else:
        #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
Пример #15
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,menu_conf={'pos':(300,200),'size':(300,300),'collapsed':False},mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.menu_conf = menu_conf
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = mode
        # edit surfaces
        self.edit_surfaces = []


        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None



    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Marker Tracker')
        self.menu.configuration = self.menu_conf
        self.g_pool.gui.append(self.menu)


        self.add_button = ui.Thumb('add_surface',setter=self.add_surface,getter=lambda:False,label='Add Surface',hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu_conf= self.menu.configuration
            self.menu= None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        pass
        # self._bar.clear()
        self.menu.elements[:] = []
        self.menu.append(ui.Info_Text('The offline marker tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'))
        self.menu.append(ui.Button('Close',self.close))
        self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Frames","Show marker IDs", "Surface edit mode","Show Heatmaps","Show Metrics"] ))
        self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'))
        self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(ui.Button("Export gaze and surface data", self.save_surface_statsics_to_file))
        self.menu.append(ui.Button("Add surface", lambda:self.add_surface('_')))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s"%idx)
            s_menu.collapsed=True
            s_menu.append(ui.Text_Input('name',s))
            #     self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' )
            s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size'))
            s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size'))
            s_menu.append(ui.Button('Open Debug Window',s.open_close_window))
            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)
            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove',remove_s))
            self.menu.append(s_menu)



    def close(self):
        self.alive = False

    def on_window_resize(self,window,w,h):
        self.win_size = w,h

    def on_click(self,pos,button,action):
        if self.mode=="Surface edit mode":
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.))
                    x,y = pos
                    for s in self.surfaces:
                        if s.detected and s.defined:
                            for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)):
                                vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True)
                                if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels
                                    self.edit_surfaces.append((s,i))

    def advance(self):
        pass

    def add_surface(self,_):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def remove_surface(self,i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_gui_markers()


    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc metrics:
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = create_named_texture(heatmap.shape)
            update_named_texture(s.metrics_texture,heatmap)




    def update(self,frame,events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                pass
                # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img,self.markers)

        # edit surfaces by user
        if self.mode == "Surface edit mode":
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  os.path.join(self.g_pool.rec_dir,'world.mkv')
        if not os.path.isfile(video_file_path):
            video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i',0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,idx=idx)

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode == "Show Markers and Frames":
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.3),line_type=GL_POLYGON)
                draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.6))

            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)

        if self.mode == "Surface edit mode":
            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)
                s.gl_draw_corners()

        if self.mode == "Show Heatmaps":
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        glOrtho(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(8.,.6,.2,8.)
        draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4)

        color = RGBA(0.,.7,.3,8.)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_polyline(s,color=color,line_type=GL_LINES,thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark


        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark,out_mark)


        metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark))
        logger.info("exporting metrics to %s"%metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s"%metrics_dir)
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                gp_x,gp_y = gp['norm_pos']
                                on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                                csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.real_world_size['x'],gp_x*s.real_world_size['y'],on_srf) )

            logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)

            # if s.detected and self.img is not None:
            #     #let save out the current surface image found in video

            #     #here we get the verts of the surface quad in norm_coords
            #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
            #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
            #     #now we convert to image pixel coods
            #     screen_space[:,1] = 1-screen_space[:,1]
            #     screen_space[:,1] *= self.img.shape[0]
            #     screen_space[:,0] *= self.img.shape[1]
            #     s_0,s_1 = s.real_world_size
            #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
            #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
            #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
            #     #here we do the actual perspactive transform of the image.
            #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
            #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
            #     logger.info("Saved current image as .png file.")
            # else:
            #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def get_init_dict(self):
        if self.menu:
            d = {'menu_conf':self.menu.configuration,'mode':self.mode}
        else:
            d = {'menu_conf':self.menu_conf,'mode':self.mode}
        return d



    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
Пример #16
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,gui_settings={'pos':(220,200),'size':(300,300),'iconified':False}):
        super(Offline_Marker_Detector, self).__init__()
        self.g_pool = g_pool
        self.gui_settings = gui_settings
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.load('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.load('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = c_int(0)
        # edit surfaces
        self.edit_surfaces = []

        #detector vars
        self.robust_detection = c_bool(1)
        self.aperture = c_int(11)
        self.min_marker_perimeter = 80

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)
        self.recent_pupil_positions = []

        self.img_shape = None
        self.img = None


    def init_gui(self):
        import atb
        pos = self.gui_settings['pos']
        atb_label = "Marker Detector"
        self._bar = atb.Bar(name =self.__class__.__name__+str(id(self)), label=atb_label,
            help="circle", color=(50, 150, 50), alpha=50,
            text='light', position=pos,refresh=.1, size=self.gui_settings['size'])
        self._bar.iconified = self.gui_settings['iconified']
        self.update_bar_markers()

        #set up bar display padding
        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))


    def unset_alive(self):
        self.alive = False

    def load(self, var_name, default):
        return self.surface_definitions.get(var_name,default)
    def save(self, var_name, var):
            self.surface_definitions[var_name] = var

    def on_window_resize(self,window,w,h):
        self.win_size = w,h


    def on_click(self,pos,button,action):
        if self.mode.value == 1:
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.))
                    x,y = pos
                    for s in self.surfaces:
                        if s.detected:
                            for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)):
                                vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True)
                                if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels
                                    self.edit_surfaces.append((s,i))

    def advance(self):
        pass

    def add_surface(self):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool,gaze_positions_by_frame=self.g_pool.positions_by_frame))
        self.update_bar_markers()

    def remove_surface(self,i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_bar_markers()

    def update_bar_markers(self):
        self._bar.clear()
        self._bar.add_button('close',self.unset_alive)
        self._bar.add_button("  add surface   ", self.add_surface, key='a')
        # when cache is updated, when surface is edited, when trimmarks are changed.
        # dropdown menue: markers and surface, surface edit mode, heatmaps, metrics
        self._bar.mode_enum = atb.enum("Mode",{"Show Markers and Frames":0,"Show Marker Id's":4, "Surface edit mode":1,"Show Heatmaps":2,"Show Metrics":3})
        self._bar.add_var("Mode",self.mode,vtype=self._bar.mode_enum)
        self._bar.add_button("  (re)-calculate gaze distributions   ", self.recalculate)
        self._bar.add_button("   Export Gaze and Surface Data   ", self.save_surface_statsics_to_file)

        for s,i in zip(self.surfaces,range(len(self.surfaces)))[::-1]:
            self._bar.add_var("%s_name"%i,create_string_buffer(512),getter=s.atb_get_name,setter=s.atb_set_name,group=str(i),label='name')
            self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' )
            self._bar.add_var("%s_x_scale"%i,vtype=c_float, getter=s.atb_get_scale_x, min=1,setter=s.atb_set_scale_x,group=str(i),label='real width', help='this scale factor is used to adjust the coordinate space for your needs (think photo pixels or mm or whatever)' )
            self._bar.add_var("%s_y_scale"%i,vtype=c_float, getter=s.atb_get_scale_y,min=1,setter=s.atb_set_scale_y,group=str(i),label='real height',help='defining x and y scale factor you atumatically set the correct aspect ratio.' )
            self._bar.add_var("%s_window"%i,setter=s.toggle_window,getter=s.window_open,group=str(i),label='open in window')
            # self._bar.add_button("%s_hm"%i, s.generate_heatmap, label='generate_heatmap',group=str(i))
            # self._bar.add_button("%s_export"%i, self.save_surface_positions_to_file,data=i, label='export surface data',group=str(i))
            self._bar.add_button("%s_remove"%i, self.remove_surface,data=i,label='remove',group=str(i))


    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc metrics:
        gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section]))
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = create_named_texture(heatmap)




    def update(self,frame,recent_pupil_positions,events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode.value == 4:
            draw_markers(frame.img,self.markers)

        # edit surfaces by user
        if self.mode.value == 1:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value(c_int,0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,idx=idx)

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode.value in (0,1):
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.3),type='Polygon')
                draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.6))

            for s in self.surfaces:
                s.gl_draw_frame()

        if self.mode.value == 1:
            for s in  self.surfaces:
                s.gl_draw_corners()
        if self.mode.value == 2:
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode.value == 3:
            #draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        gluOrtho2D(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8.,.6,.2,8.)
        draw_gl_polyline(cached_ranges,color=color,type='Lines',thickness=4)

        color = (0.,.7,.3,8.)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_gl_polyline(s,color=color,type='Lines',thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark


        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark,out_mark)


        metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark))
        logger.info("exporting metrics to %s"%metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s"%metrics_dir)
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in ref_srf_data['gaze_on_srf']:
                                gp_x,gp_y = gp['norm_gaze_on_srf']
                                on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                                csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.scale_factor[0],gp_x*s.scale_factor[1],on_srf) )

            logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)

            # if s.detected and self.img is not None:
            #     #let save out the current surface image found in video

            #     #here we get the verts of the surface quad in norm_coords
            #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
            #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
            #     #now we convert to image pixel coods
            #     screen_space[:,1] = 1-screen_space[:,1]
            #     screen_space[:,1] *= self.img.shape[0]
            #     screen_space[:,0] *= self.img.shape[1]
            #     s_0,s_1 = s.scale_factor
            #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
            #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
            #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
            #     #here we do the actual perspactive transform of the image.
            #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.scale_factor[0]),int(s.scale_factor[1])) )
            #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
            #     logger.info("Saved current image as .png file.")
            # else:
            #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def get_init_dict(self):
        d = {}
        if hasattr(self,'_bar'):
            gui_settings = {'pos':self._bar.position,'size':self._bar.size,'iconified':self._bar.iconified}
            d['gui_settings'] = gui_settings

        return d

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happends either voluntary or forced.
        if you have an atb bar or glfw window destroy it here.
        """

        self.save("offline_square_marker_surfaces",[rs.save_to_dict() for rs in self.surfaces if rs.defined])
        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        self.surface_definitions.close()

        for s in self.surfaces:
            s.close_window()
        self._bar.destroy()
Пример #17
0
class Offline_Surface_Tracker(Surface_Tracker, Analysis_Plugin_Base):
    """
    Special version of surface tracker for use with videofile source.
    It uses a seperate process to search all frames in the world video file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self,
                 g_pool,
                 mode="Show Markers and Surfaces",
                 min_marker_perimeter=100,
                 invert_image=False,
                 robust_detection=True):
        super().__init__(
            g_pool,
            mode,
            min_marker_perimeter,
            invert_image,
            robust_detection,
        )
        self.order = .2
        self.marker_cache_version = 2
        self.min_marker_perimeter_cacher = 20  #find even super small markers. The surface locater will filter using min_marker_perimeter
        self.timeline_line_height = 16

        self.load_marker_cache()
        self.init_marker_cacher()
        for s in self.surfaces:
            s.init_cache(self.cache, self.min_marker_perimeter,
                         self.min_id_confidence)
        self.recalculate()

    def load_marker_cache(self):
        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(self.g_pool.rec_dir, 'square_marker_cache'))
        version = self.persistent_cache.get('version', 0)
        cache = self.persistent_cache.get('marker_cache', None)
        if cache is None:
            self.cache = Cache_List([False for _ in self.g_pool.timestamps])
            self.persistent_cache['version'] = self.marker_cache_version
            self.persistent_cache['inverted_markers'] = self.invert_image
        elif version != self.marker_cache_version:
            self.persistent_cache['version'] = self.marker_cache_version
            self.invert_image = self.persistent_cache.get(
                'inverted_markers', False)
            self.cache = Cache_List([False for _ in self.g_pool.timestamps])
            logger.debug(
                "Marker cache version missmatch. Rebuilding marker cache.")
        else:
            self.cache = Cache_List(cache)
            #we overwrite the inverted_image setting from init with the one save in the marker cache.
            self.invert_image = self.persistent_cache.get(
                'inverted_markers', False)
            logger.debug(
                "Loaded marker cache {} / {} frames had been searched before".
                format(
                    len(self.cache) - self.cache.count(False),
                    len(self.cache)))

    def clear_marker_cache(self):
        self.cache = Cache_List([False for _ in self.g_pool.timestamps])
        self.persistent_cache['version'] = self.marker_cache_version

    def load_surface_definitions_from_file(self):
        self.surface_definitions = Persistent_Dict(
            os.path.join(self.g_pool.rec_dir, 'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',
                                        []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'offline_square_marker_surfaces', [])
            ]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',
                                          []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'realtime_square_marker_surfaces', [])
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

    def init_ui(self):
        self.add_menu()
        self.menu.label = 'Offline Surface Tracker'
        self.add_button = ui.Thumb('add_surface',
                                   setter=lambda x: self.add_surface(),
                                   getter=lambda: False,
                                   label='A',
                                   hotkey='a')
        self.g_pool.quickbar.append(self.add_button)

        self.glfont = fontstash.Context()
        self.glfont.add_font('opensans', ui.get_opensans_font_path())
        self.glfont.set_color_float((1., 1., 1., .8))
        self.glfont.set_align_string(v_align='right', h_align='top')

        self.timeline = ui.Timeline(
            'Surface Tracker', self.gl_display_cache_bars, self.draw_labels,
            self.timeline_line_height * (len(self.surfaces) + 1))
        self.g_pool.user_timelines.append(self.timeline)

        self.update_gui_markers()

    def deinit_ui(self):
        self.g_pool.user_timelines.remove(self.timeline)
        self.timeline = None
        self.glfont = None
        self.remove_menu()
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        def set_min_marker_perimeter(val):
            self.min_marker_perimeter = val
            self.notify_all({
                'subject': 'min_marker_perimeter_changed',
                'delay': 1
            })

        def set_invert_image(val):
            self.invert_image = val
            self.invalidate_marker_cache()
            self.invalidate_surface_caches()

        self.menu.elements[:] = []
        self.menu.append(
            ui.Switch('invert_image',
                      self,
                      setter=set_invert_image,
                      label='Use inverted markers'))
        self.menu.append(
            ui.Slider('min_marker_perimeter',
                      self,
                      min=20,
                      max=500,
                      step=1,
                      setter=set_min_marker_perimeter))
        self.menu.append(
            ui.Info_Text(
                'The offline surface tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'
            ))
        self.menu.append(
            ui.Info_Text(
                "Press the export button or type 'e' to start the export."))
        self.menu.append(
            ui.Selector('mode',
                        self,
                        label='Mode',
                        selection=[
                            "Show Markers and Surfaces", "Show marker IDs",
                            "Show Heatmaps", "Show Metrics"
                        ]))
        self.menu.append(
            ui.Info_Text(
                'To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'
            ))
        self.menu.append(
            ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(ui.Button("Add surface", lambda: self.add_surface()))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface {}".format(idx))
            s_menu.collapsed = True
            s_menu.append(ui.Text_Input('name', s))
            s_menu.append(ui.Text_Input('x', s.real_world_size,
                                        label='X size'))
            s_menu.append(ui.Text_Input('y', s.real_world_size,
                                        label='Y size'))
            s_menu.append(ui.Button('Open Debug Window', s.open_close_window))

            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)

            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove', remove_s))
            self.menu.append(s_menu)

    def on_notify(self, notification):
        if notification['subject'] == 'gaze_positions_changed':
            logger.info('Gaze postions changed. Recalculating.')
            self.recalculate()
        if notification['subject'] == 'min_data_confidence_changed':
            logger.info('Min_data_confidence changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'surfaces_changed':
            logger.info('Surfaces changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'min_marker_perimeter_changed':
            logger.info(
                'Min marker perimeter adjusted. Re-detecting surfaces.')
            self.invalidate_surface_caches()
        elif notification['subject'] == "should_export":
            self.save_surface_statsics_to_file(notification['range'],
                                               notification['export_dir'])

    def add_surface(self):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.timeline.height += self.timeline_line_height
        self.update_gui_markers()

    def remove_surface(self, i):
        super().remove_surface(i)
        self.timeline.height -= self.timeline_line_height

    def recalculate(self):

        in_mark = self.g_pool.seek_control.trim_left
        out_mark = self.g_pool.seek_control.trim_right
        section = slice(in_mark, out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results, dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255. / max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s, c_map in zip(self.surfaces, results_c_maps):
            heatmap = np.ones((1, 1, 4), dtype=np.uint8) * 125
            heatmap[:, :, :3] = c_map
            s.metrics_texture = Named_Texture()
            s.metrics_texture.update_from_ndarray(heatmap)

    def invalidate_surface_caches(self):
        for s in self.surfaces:
            s.cache = None

    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        # self.markers = [m for m in self.cache[frame.index] if m['perimeter'>=self.min_marker_perimeter]
        self.markers = self.cache[frame.index]
        if self.markers is False:
            self.markers = []
            # tell precacher that it better have every thing from here on analyzed
            self.seek_marker_cacher(frame.index)

        events['surfaces'] = []
        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers, self.min_marker_perimeter,
                         self.min_id_confidence)
            if s.detected:
                events['surfaces'].append({
                    'name':
                    s.name,
                    'uid':
                    s.uid,
                    'm_to_screen':
                    s.m_to_screen.tolist(),
                    'm_from_screen':
                    s.m_from_screen.tolist(),
                    'gaze_on_srf':
                    s.gaze_on_srf,
                    'timestamp':
                    frame.timestamp,
                    'camera_pose_3d':
                    s.camera_pose_3d.tolist()
                    if s.camera_pose_3d is not None else None
                })

        if self.mode == "Show marker IDs":
            draw_markers(frame.img, self.markers)

        elif self.mode == "Show Markers and Surfaces":
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,
                                self.g_pool.camera_render_size,
                                flip_y=True)
                for s, v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx, new_pos)
            else:
                # update srf with no or invald cache:
                for s in self.surfaces:
                    if s.cache == None:
                        s.init_cache(self.cache, self.min_marker_perimeter,
                                     self.min_id_confidence)
                        self.notify_all({
                            'subject': 'surfaces_changed',
                            'delay': 1
                        })

        # allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()

    def invalidate_marker_cache(self):
        self.close_marker_cacher()
        self.clear_marker_cache()
        self.init_marker_cacher()

    def init_marker_cacher(self):
        from marker_detector_cacher import fill_cache
        visited_list = [False if x is False else True for x in self.cache]
        video_file_path = self.g_pool.capture.source_path
        self.cache_queue = mp.Queue()
        self.cacher_seek_idx = mp.Value('i', 0)
        self.cacher_run = mp.Value(c_bool, True)
        self.cacher = mp.Process(
            target=fill_cache,
            args=(visited_list, video_file_path, self.cache_queue,
                  self.cacher_seek_idx, self.cacher_run,
                  self.min_marker_perimeter_cacher, self.invert_image))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx, c_m = self.cache_queue.get()
            self.cache.update(idx, c_m)

            for s in self.surfaces:
                s.update_cache(self.cache,
                               min_marker_perimeter=self.min_marker_perimeter,
                               min_id_confidence=self.min_id_confidence,
                               idx=idx)
            if self.cacher_run.value is False:
                self.recalculate()
            if self.timeline:
                self.timeline.refresh()

    def seek_marker_cacher(self, idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join(1.0)
        if self.cacher.is_alive():
            logger.error("Marker cacher unresponsive - terminating.")
            self.cacher.terminate()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        super().gl_display()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self, width, height, scale):
        """
        """
        with gl_utils.Coord_System(0, self.cache.length - 1, height, 0):
            # Lines for areas that have been cached
            cached_ranges = []
            for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
                cached_ranges += (r[0], 0), (r[1], 0
                                             )  # [(0,0),(1,0),(3,0),(4,0)]

            glTranslatef(0, scale * self.timeline_line_height / 2, 0)
            color = RGBA(.8, .6, .2, .8)
            draw_polyline(cached_ranges,
                          color=color,
                          line_type=GL_LINES,
                          thickness=scale * 4)

            # Lines where surfaces have been found in video
            cached_surfaces = []
            for s in self.surfaces:
                found_at = []
                if s.cache is not None:
                    for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                        found_at += (r[0], 0), (r[1], 0
                                                )  # [(0,0),(1,0),(3,0),(4,0)]
                    cached_surfaces.append(found_at)

            color = RGBA(0, .7, .3, .8)

            for s in cached_surfaces:
                glTranslatef(0, scale * self.timeline_line_height, 0)
                draw_polyline(s,
                              color=color,
                              line_type=GL_LINES,
                              thickness=scale * 2)

    def draw_labels(self, width, height, scale):
        self.glfont.set_size(self.timeline_line_height * .8 * scale)
        self.glfont.draw_text(width, 0, 'Marker Cache')
        for idx, s in enumerate(self.surfaces):
            glTranslatef(0, self.timeline_line_height * scale, 0)
            self.glfont.draw_text(width, 0, s.name)

    def save_surface_statsics_to_file(self, export_range, export_dir):
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        metrics_dir = os.path.join(export_dir, 'surfaces')
        section = slice(*export_range)
        in_mark = section.start
        out_mark = section.stop
        logger.info("exporting metrics to {}".format(metrics_dir))
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning(
                    "Could not make metrics dir {}".format(metrics_dir))
                return

        with open(os.path.join(metrics_dir, 'surface_visibility.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count', frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning(
                        "The surface is not cached. Please wait for the cacher to collect data."
                    )
                    return
                visible_count = s.visible_count_in_section(section)
                csv_writer.writerow((s.name, visible_count))
            logger.info("Created 'surface_visibility.csv' file")

        with open(os.path.join(metrics_dir, 'surface_gaze_distribution.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # gaze distribution report
            gaze_in_section = list(
                chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(
                ('total_gaze_point_count', len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'gaze_count'))

            for s in self.surfaces:
                gaze_on_srf = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set(
                    [gp['base_data']['timestamp'] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow((s.name, len(gaze_on_srf)))

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf)))
            logger.info("Created 'surface_gaze_distribution.csv' file")

        with open(os.path.join(metrics_dir, 'surface_events.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface events report
            csv_writer.writerow(('frame_number', 'timestamp', 'surface_name',
                                 'surface_uid', 'event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
                    events.append({
                        'frame_id': enter_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'enter'
                    })
                    events.append({
                        'frame_id': exit_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'exit'
                    })

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow(
                    (e['frame_id'], self.g_pool.timestamps[e['frame_id']],
                     e['srf_name'], e['srf_uid'], e['event']))
            logger.info("Created 'surface_events.csv' file")

        for s in self.surfaces:
            # per surface names:
            surface_name = '_' + s.name.replace('/', '') + '_' + s.uid

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,
                                   'srf_positons' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                     'm_from_screen', 'detected_markers'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow(
                                (idx, ts, ref_srf_data['m_to_screen'],
                                 ref_srf_data['m_from_screen'],
                                 ref_srf_data['detected_markers']))

            # save gaze on srf as csv.
            with open(os.path.join(
                    metrics_dir,
                    'gaze_positions_on_surface' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(
                    ('world_timestamp', 'world_frame_idx', 'gaze_timestamp',
                     'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                csv_writer.writerow(
                                    (ts, idx, gp['base_data']['timestamp'],
                                     gp['norm_pos'][0], gp['norm_pos'][1],
                                     gp['norm_pos'][0] *
                                     s.real_world_size['x'],
                                     gp['norm_pos'][1] *
                                     s.real_world_size['y'], gp['on_srf']))

            # save fixation on srf as csv.
            with open(os.path.join(
                    metrics_dir,
                    'fixations_on_surface' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(
                    ('id', 'start_timestamp', 'duration', 'start_frame',
                     'end_frame', 'norm_pos_x', 'norm_pos_y', 'x_scaled',
                     'y_scaled', 'on_srf'))
                fixations_on_surface = []
                for idx, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)), s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_duplicates = dict([
                    (f['base_data']['id'], f) for f in fixations_on_surface
                ]).values()
                for f_on_s in removed_duplicates:
                    f = f_on_s['base_data']
                    f_x, f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow(
                        (f['id'], f['timestamp'], f['duration'],
                         f['start_frame_index'], f['end_frame_index'], f_x,
                         f_y, f_x * s.real_world_size['x'],
                         f_y * s.real_world_size['y'], f_on_srf))

            logger.info(
                "Saved surface positon gaze and fixation data for '{}' with uid:'{}'"
                .format(s.name, s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(
                    os.path.join(metrics_dir,
                                 'heatmap' + surface_name + '.png'), s.heatmap)

        logger.info("Done exporting reference surface data.")
        # if s.detected and self.img is not None:
        #     #let save out the current surface image found in video

        #     #here we get the verts of the surface quad in norm_coords
        #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
        #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
        #     #now we convert to image pixel coods
        #     screen_space[:,1] = 1-screen_space[:,1]
        #     screen_space[:,1] *= self.img.shape[0]
        #     screen_space[:,0] *= self.img.shape[1]
        #     s_0,s_1 = s.real_world_size
        #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
        #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
        #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
        #     #here we do the actual perspactive transform of the image.
        #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
        #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
        #     logger.info("Saved current image as .png file.")
        # else:
        #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [
            rs.save_to_dict() for rs in self.surfaces if rs.defined
        ]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache['inverted_markers'] = self.invert_image
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
Пример #18
0
 def clear_marker_cache(self):
     self.cache = Cache_List([False for _ in self.g_pool.timestamps])
     self.persistent_cache['version'] = self.marker_cache_version
Пример #19
0
 def clear_marker_cache(self):
     self.cache = Cache_List([False for _ in self.g_pool.timestamps])
     self.persistent_cache['version'] = self.marker_cache_version
Пример #20
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self,
                 g_pool,
                 gui_settings={
                     'pos': (220, 200),
                     'size': (300, 300),
                     'iconified': False
                 }):
        super(Offline_Marker_Detector, self).__init__()
        self.g_pool = g_pool
        self.gui_settings = gui_settings
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'surface_definitions'))
        if self.load('offline_square_marker_surfaces', []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(
                    self.g_pool,
                    saved_definition=d,
                    gaze_positions_by_frame=self.g_pool.positions_by_frame)
                for d in self.load('offline_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        elif self.load('realtime_square_marker_surfaces', []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(
                    self.g_pool,
                    saved_definition=d,
                    gaze_positions_by_frame=self.g_pool.positions_by_frame)
                for d in self.load('realtime_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

        # edit surfaces
        self.surface_edit_mode = c_bool(0)
        self.edit_surfaces = []

        #detector vars
        self.robust_detection = c_bool(1)
        self.aperture = c_int(11)
        self.min_marker_perimeter = 80

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.draw_markers = c_bool(0)
        self.show_surface_idx = c_int(0)
        self.recent_pupil_positions = []

        self.img_shape = None
        self.img = None

    def init_gui(self):
        import atb
        pos = self.gui_settings['pos']
        atb_label = "Marker Detector"
        self._bar = atb.Bar(name=self.__class__.__name__ + str(id(self)),
                            label=atb_label,
                            help="circle",
                            color=(50, 150, 50),
                            alpha=50,
                            text='light',
                            position=pos,
                            refresh=.1,
                            size=self.gui_settings['size'])
        self._bar.iconified = self.gui_settings['iconified']
        self.update_bar_markers()

        #set up bar display padding
        self.on_window_resize(glfwGetCurrentContext(),
                              *glfwGetWindowSize(glfwGetCurrentContext()))

    def unset_alive(self):
        self.alive = False

    def load(self, var_name, default):
        return self.surface_definitions.get(var_name, default)

    def save(self, var_name, var):
        self.surface_definitions[var_name] = var

    def on_window_resize(self, window, w, h):
        self.win_size = w, h

    def on_click(self, pos, button, action):
        if self.surface_edit_mode.value:
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0., 0.), (1., 0.), (1., 1.), (0., 1.))
                    x, y = pos
                    for s in self.surfaces:
                        if s.detected:
                            for (vx, vy), i in zip(
                                    s.ref_surface_to_img(np.array(surf_verts)),
                                    range(4)):
                                vx, vy = denormalize(
                                    (vx, vy),
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                                if sqrt((x - vx)**2 +
                                        (y - vy)**2) < 15:  #img pixels
                                    self.edit_surfaces.append((s, i))

    def advance(self):
        pass

    def add_surface(self):
        self.surfaces.append(
            Offline_Reference_Surface(
                self.g_pool,
                gaze_positions_by_frame=self.g_pool.positions_by_frame))
        self.update_bar_markers()

    def remove_surface(self, i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_bar_markers()

    def update_bar_markers(self):
        self._bar.clear()
        self._bar.add_button('close', self.unset_alive)
        self._bar.add_var("draw markers", self.draw_markers)
        self._bar.add_button("  add surface   ", self.add_surface, key='a')
        self._bar.add_var("  edit mode   ", self.surface_edit_mode)
        for s, i in zip(self.surfaces, range(len(self.surfaces)))[::-1]:
            self._bar.add_var("%s_name" % i,
                              create_string_buffer(512),
                              getter=s.atb_get_name,
                              setter=s.atb_set_name,
                              group=str(i),
                              label='name')
            self._bar.add_var("%s_markers" % i,
                              create_string_buffer(512),
                              getter=s.atb_marker_status,
                              group=str(i),
                              label='found/registered markers')
            self._bar.add_var(
                "%s_x_scale" % i,
                vtype=c_float,
                getter=s.atb_get_scale_x,
                min=1,
                setter=s.atb_set_scale_x,
                group=str(i),
                label='real width',
                help=
                'this scale factor is used to adjust the coordinate space for your needs (think photo pixels or mm or whatever)'
            )
            self._bar.add_var(
                "%s_y_scale" % i,
                vtype=c_float,
                getter=s.atb_get_scale_y,
                min=1,
                setter=s.atb_set_scale_y,
                group=str(i),
                label='real height',
                help=
                'defining x and y scale factor you atumatically set the correct aspect ratio.'
            )
            self._bar.add_var("%s_window" % i,
                              setter=s.toggle_window,
                              getter=s.window_open,
                              group=str(i),
                              label='open in window')
            self._bar.add_button("%s_hm" % i,
                                 s.generate_heatmap,
                                 label='generate_heatmap',
                                 group=str(i))
            self._bar.add_button("%s_export" % i,
                                 self.save_surface_positions_to_file,
                                 data=i,
                                 label='export surface data',
                                 group=str(i))
            self._bar.add_button("%s_remove" % i,
                                 self.remove_surface,
                                 data=i,
                                 label='remove',
                                 group=str(i))

    def update(self, frame, recent_pupil_positions, events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            # locate markers because precacher has not anayzed this frame yet. Most likely a seek event
            self.markers = []
            self.seek_marker_cacher(
                frame.index
            )  # tell precacher that it better have every thing from here analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                events.append({
                    'type': 'marker_ref_surface',
                    'name': s.name,
                    'uid': s.uid,
                    'm_to_screen': s.m_to_screen,
                    'm_from_screen': s.m_from_screen,
                    'timestamp': frame.timestamp,
                    'gaze_on_srf': s.gaze_on_srf
                })

        if self.draw_markers.value:
            draw_markers(frame.img, self.markers)

        # edit surfaces by user
        if self.surface_edit_mode:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(window))
            pos = denormalize(pos,
                              (frame.img.shape[1],
                               frame.img.shape[0]))  # Position in img pixels

            for s, v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                    new_pos = s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx, new_pos)
                    s.cache = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)

        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()

    def init_marker_cacher(self):
        forking_enable(0)  #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path = os.path.join(self.g_pool.rec_dir, 'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value(c_int, 0)
        self.cacher_run = Value(c_bool, True)
        self.cacher = Process(target=fill_cache,
                              args=(visited_list, video_file_path,
                                    self.cache_queue, self.cacher_seek_idx,
                                    self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx, c_m = self.cache_queue.get()
            self.cache.update(idx, c_m)
            for s in self.surfaces:
                s.update_cache(self.cache, idx=idx)

    def seek_marker_cacher(self, idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()

        for m in self.markers:
            hat = np.array(
                [[[0, 0], [0, 1], [.5, 1.3], [1, 1], [1, 0], [0, 0]]],
                dtype=np.float32)
            hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6, 2)), (0.1, 1., 1., .5))

        for s in self.surfaces:
            s.gl_draw_frame()
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.surface_edit_mode.value:
            for s in self.surfaces:
                s.gl_draw_corners()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

        # Lines for areas that have be cached
        cached_ranges = []
        for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
            cached_ranges += (r[0], 0), (r[1], 0)  #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                    found_at += (r[0], 0), (r[1], 0
                                            )  #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = self.win_size
        h_pad = padding * (self.cache.length - 2) / float(width)
        v_pad = padding * 1. / (height - 2)
        gluOrtho2D(
            -h_pad, (self.cache.length - 1) + h_pad, -v_pad, 1 + v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8., .6, .2, 8.)
        draw_gl_polyline(cached_ranges, color=color, type='Lines', thickness=4)

        color = (0., .7, .3, 8.)

        for s in cached_surfaces:
            glTranslatef(0, .02, 0)
            draw_gl_polyline(s, color=color, type='Lines', thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()

    def save_surface_positions_to_file(self, i):
        s = self.surfaces[i]

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark

        if s.cache == None:
            logger.warning(
                "The surface is not cached. Please wait for the cacher to collect data."
            )
            return

        srf_dir = os.path.join(
            self.g_pool.rec_dir,
            'surface_data' + '_' + s.name.replace('/', '') + '_' + s.uid)
        logger.info("exporting surface gaze data to %s" % srf_dir)
        if os.path.isdir(srf_dir):
            logger.info(
                "Will overwrite previous export for this referece surface")
        else:
            try:
                os.mkdir(srf_dir)
            except:
                logger.warning("Could name make export dir %s" % srf_dir)
                return

        #save surface_positions as pickle file
        save_object(s.cache.to_list(), os.path.join(srf_dir, 'srf_positons'))

        #save surface_positions as csv
        with open(os.path.join(srf_dir, 'srf_positons.csv'), 'wb') as csvfile:
            csw_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            csw_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                 'm_from_screen', 'detected_markers'))
            for idx, ts, ref_srf_data in zip(
                    range(len(self.g_pool.timestamps)), self.g_pool.timestamps,
                    s.cache):
                if in_mark <= idx <= out_mark:
                    if ref_srf_data is not None and ref_srf_data is not False:
                        csw_writer.writerow(
                            (idx, ts, ref_srf_data['m_to_screen'],
                             ref_srf_data['m_from_screen'],
                             ref_srf_data['detected_markers']))

        #save gaze on srf as csv.
        with open(os.path.join(srf_dir, 'gaze_positions_on_surface.csv'),
                  'wb') as csvfile:
            csw_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            csw_writer.writerow(
                ('world_frame_idx', 'world_timestamp', 'eye_timestamp',
                 'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
            for idx, ts, ref_srf_data in zip(
                    range(len(self.g_pool.timestamps)), self.g_pool.timestamps,
                    s.cache):
                if in_mark <= idx <= out_mark:
                    if ref_srf_data is not None and ref_srf_data is not False:
                        for gp in ref_srf_data['gaze_on_srf']:
                            gp_x, gp_y = gp['norm_gaze_on_srf']
                            on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                            csw_writer.writerow(
                                (idx, ts, gp['timestamp'], gp_x, gp_y,
                                 gp_x * s.scale_factor[0],
                                 gp_x * s.scale_factor[1], on_srf))

        logger.info(
            "Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"
            % (s.name, s.uid))

        if s.heatmap is not None:
            logger.info("Saved Heatmap as .png file.")
            cv2.imwrite(os.path.join(srf_dir, 'heatmap.png'), s.heatmap)

        if s.detected and self.img is not None:
            #let save out the current surface image found in video

            #here we get the verts of the surface quad in norm_coords
            mapped_space_one = np.array(((0, 0), (1, 0), (1, 1), (0, 1)),
                                        dtype=np.float32).reshape(-1, 1, 2)
            screen_space = cv2.perspectiveTransform(mapped_space_one,
                                                    s.m_to_screen).reshape(
                                                        -1, 2)
            #now we convert to image pixel coods
            screen_space[:, 1] = 1 - screen_space[:, 1]
            screen_space[:, 1] *= self.img.shape[0]
            screen_space[:, 0] *= self.img.shape[1]
            s_0, s_1 = s.scale_factor
            #no we need to flip vertically again by setting the mapped_space verts accordingly.
            mapped_space_scaled = np.array(
                ((0, s_1), (s_0, s_1), (s_0, 0), (0, 0)), dtype=np.float32)
            M = cv2.getPerspectiveTransform(screen_space, mapped_space_scaled)
            #here we do the actual perspactive transform of the image.
            srf_in_video = cv2.warpPerspective(
                self.img, M, (int(s.scale_factor[0]), int(s.scale_factor[1])))
            cv2.imwrite(os.path.join(srf_dir, 'surface.png'), srf_in_video)
            logger.info("Saved current image as .png file.")

    def get_init_dict(self):
        d = {}
        if hasattr(self, '_bar'):
            gui_settings = {
                'pos': self._bar.position,
                'size': self._bar.size,
                'iconified': self._bar.iconified
            }
            d['gui_settings'] = gui_settings

        return d

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happends either voluntary or forced.
        if you have an atb bar or glfw window destroy it here.
        """

        self.save("offline_square_marker_surfaces",
                  [rs.save_to_dict() for rs in self.surfaces if rs.defined])
        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        self.surface_definitions.close()

        for s in self.surfaces:
            s.close_window()
        self._bar.destroy()