예제 #1
0
def _clone_task(clone_func: Callable[[], None], errors: Queue) -> None:
    try:
        clone_func()
    except Exception as e:
        exc_buffer = io.StringIO()
        traceback.print_exc(file=exc_buffer)
        errors.put_nowait(exc_buffer.getvalue())
        raise e
예제 #2
0
def main():
    timePipe, sigPipe = Pipe()
    q = Queue()
    clock = Process(target=tickTock, args=(timePipe, ))
    testSignal = Process(target=signal, args=(q, sigPipe, [1]))
    testSignal.start()
    while True:
        print(q.get())
예제 #3
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value(c_int,0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
     self.cacher.start()
예제 #4
0
 def test_set_pdeathsig(self):
     success = "done"
     q = Queue()
     p = Process(target=parent_task, args=(q, success))
     p.start()
     child_proc = psutil.Process(q.get(timeout=3))
     try:
         p.terminate()
         assert q.get(timeout=3) == success
     finally:
         child_proc.terminate()
예제 #5
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  self.g_pool.capture.src
     timestamps = self.g_pool.capture.timestamps
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value('i',0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
     self.cacher.start()
예제 #6
0
 def __init__(self, fun, args, postprocess, job):
     """ Build multiprocessing queues and start worker. """
     super(LongCalculation, self).__init__(job, "Cancel", 0, 0)
     self.setModal(True)
     self.input = Queue()
     self.output = Queue()
     self.input.put((fun, args, postprocess))
     self.proc = Process(target=worker, args=(self.input, self.output))
     self.proc.start()
     self.timer = QTimer()
     self.timer.timeout.connect(self.update)
     self.timer.start(10)
예제 #7
0
class LongCalculation(QProgressDialog):
    """
    Multiprocessing based worker for mesh and eigenvalue calculations.

    This is necessary to make sure GUI is not blocked while mesh is built,
    or when eigenvalue calculations are performed.

    Transformations do not need as much time, unless there is one implemented
    without numpy vectorized coordinate calculations.
    """

    res = None

    def __init__(self, fun, args, postprocess, job):
        """ Build multiprocessing queues and start worker. """
        super(LongCalculation, self).__init__(job, "Cancel", 0, 0)
        self.setModal(True)
        self.input = Queue()
        self.output = Queue()
        self.input.put((fun, args, postprocess))
        self.proc = Process(target=worker, args=(self.input, self.output))
        self.proc.start()
        self.timer = QTimer()
        self.timer.timeout.connect(self.update)
        self.timer.start(10)

    def update(self):
        """ Check if worker is done, and close dialog. """
        try:
            out = self.output.get(block=False)
            if isinstance(out, basestring):
                self.setLabelText(out)
                return
            if out is None:
                self.done(0)
                return
            self.res = out
            self.timer.stop()
            self.proc.join()
            del self.proc
            self.done(1)
        except:
            pass

    def cleanUp(self):
        """ Kill the running processes if cancelled/failed. """
        if self.proc:
            while self.proc.is_alive():
                self.proc.terminate()
            del self.proc
        self.timer.stop()
예제 #8
0
def main():
    q = Queue()
    x = [1, 2, 3, 4, 5]
    pl = []
    for i in x:
        pl.append(Process(target=f, args=(
            q,
            i,
        )))
    for p in pl:
        p.start()
        p.join()
    for i in x:
        print(q.get())
예제 #9
0
def run_in_childprocess(target, codec=None, *args, **kwargs):
    assert codec is None or len(codec) == 2, codec
    queue = Queue()
    p = Process(target=_wrapper, args=(target, codec, queue, args, kwargs))
    p.start()
    e, r = queue.get()
    p.join()

    if e:
        raise e

    if codec:
        r = codec[1](r)

    return r
예제 #10
0
def clone_with_timeout(src: str, dest: str, clone_func: Callable[[], None],
                       timeout: float) -> None:
    """Clone a repository with timeout.

    Args:
        src: clone source
        dest: clone destination
        clone_func: callable that does the actual cloning
        timeout: timeout in seconds
    """
    errors: Queue = Queue()
    process = Process(target=_clone_task, args=(clone_func, errors))
    process.start()
    process.join(timeout)

    if process.is_alive():
        process.terminate()
        # Give it literally a second (in successive steps of 0.1 second),
        # then kill it.
        # Can't use `process.join(1)` here, billiard appears to be bugged
        # https://github.com/celery/billiard/issues/270
        killed = False
        for _ in range(10):
            time.sleep(0.1)
            if not process.is_alive():
                break
        else:
            killed = True
            os.kill(process.pid, signal.SIGKILL)
        raise CloneTimeout(src, timeout, killed)

    if not errors.empty():
        raise CloneFailure(src, dest, errors.get())
예제 #11
0
def main():
    # To assign camera by name: put string(s) in list

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='GUI for gaze tracking and pupillometry')
    parser.add_argument('-eye', dest='eye_file', type=str, help="Work with existing video recording, instead of live feed", default='')
    parser.add_argument('-world', dest='world_file', type=str, help="Work with existing video recording, instead of live feed", default='')

    args = parser.parse_args()

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    if args.eye_file == '':
        eye_src = ["UI154xLE-M", "USB Camera-B4.09.24.1", "FaceTime Camera (Built-in)", "Microsoft", "6000","Integrated Camera"]
        # to assign cameras directly, using integers as demonstrated below
        # eye_src = 1
    else:
#        print "Using provide file: %s" % args.filename
        eye_src = args.eye_file

    if args.world_file == '':
        world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]
        # to assign cameras directly, using integers as demonstrated below
        # world_src = 0
    else:
        world_src = args.world_file

    # Camera video size in pixels (width,height)
    eye_size = (260,216) #(1280,1024)
    world_size = (640,480)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
예제 #12
0
def main():

    # To assign camera by name: put string(s) in list
    eye_cam_names = [
        "USB 2.0 Camera", "Microsoft", "6000", "Integrated Camera",
        "HD USB Camera"
    ]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]
    eye_src = (eye_cam_names,
               0), (eye_cam_names, 1
                    )  #first match for eye0 and second match for eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  4 , 5 #second arg will be ignored for monocular eye trackers
    # world_src = 1

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/000/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Camera video size in pixels (width,height)
    eye_size = (640, 480)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool, 0)
    g_pool.timebase = Value(c_double, 0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular

    p_eye = []
    for eye_id in range(1 + 1 * binocular):
        rx, tx = Pipe(False)
        p_eye += [
            Process(target=eye,
                    args=(g_pool, eye_src[eye_id], eye_size, rx, eye_id))
        ]
        g_pool.eye_tx += [tx]
        p_eye[-1].start()

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()
예제 #13
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  self.g_pool.capture.src
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value('i',0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter))
     self.cacher.start()
예제 #14
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000", "Integrated Camera"]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640, 360)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool, 0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double, 0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool, eye_src, eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    p_eye.join()
예제 #15
0
파일: main.py 프로젝트: zenithlight/pupil
def main():

    #IPC
    pupil_queue = Queue()
    timebase = Value(c_double, 0)

    cmd_world_end, cmd_launcher_end = Pipe()
    com0 = Pipe(True)
    eyes_are_alive = Value(c_bool, 0), Value(c_bool, 0)
    com1 = Pipe(True)
    com_world_ends = com0[0], com1[0]
    com_eye_ends = com0[1], com1[1]

    p_world = Process(target=world,
                      args=(pupil_queue, timebase, cmd_world_end,
                            com_world_ends, eyes_are_alive, user_dir,
                            app_version, video_sources['world']))
    p_world.start()

    while True:
        #block and listen for commands from world process.
        cmd = cmd_launcher_end.recv()
        if cmd == "Exit":
            break
        else:
            eye_id = cmd
            p_eye = Process(target=eye,
                            args=(pupil_queue, timebase, com_eye_ends[eye_id],
                                  eyes_are_alive[eye_id], user_dir,
                                  app_version, eye_id,
                                  video_sources['eye%s' % eye_id]))
            p_eye.start()

    for p in active_children():
        p.join()
    logger.debug('Laucher exit')
예제 #16
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self,
                 g_pool,
                 gui_settings={
                     'pos': (220, 200),
                     'size': (300, 300),
                     'iconified': False
                 }):
        super(Offline_Marker_Detector, self).__init__()
        self.g_pool = g_pool
        self.gui_settings = gui_settings
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'surface_definitions'))
        if self.load('offline_square_marker_surfaces', []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(
                    self.g_pool,
                    saved_definition=d,
                    gaze_positions_by_frame=self.g_pool.positions_by_frame)
                for d in self.load('offline_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        elif self.load('realtime_square_marker_surfaces', []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(
                    self.g_pool,
                    saved_definition=d,
                    gaze_positions_by_frame=self.g_pool.positions_by_frame)
                for d in self.load('realtime_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

        # edit surfaces
        self.surface_edit_mode = c_bool(0)
        self.edit_surfaces = []

        #detector vars
        self.robust_detection = c_bool(1)
        self.aperture = c_int(11)
        self.min_marker_perimeter = 80

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.draw_markers = c_bool(0)
        self.show_surface_idx = c_int(0)
        self.recent_pupil_positions = []

        self.img_shape = None
        self.img = None

    def init_gui(self):
        import atb
        pos = self.gui_settings['pos']
        atb_label = "Marker Detector"
        self._bar = atb.Bar(name=self.__class__.__name__ + str(id(self)),
                            label=atb_label,
                            help="circle",
                            color=(50, 150, 50),
                            alpha=50,
                            text='light',
                            position=pos,
                            refresh=.1,
                            size=self.gui_settings['size'])
        self._bar.iconified = self.gui_settings['iconified']
        self.update_bar_markers()

        #set up bar display padding
        self.on_window_resize(glfwGetCurrentContext(),
                              *glfwGetWindowSize(glfwGetCurrentContext()))

    def unset_alive(self):
        self.alive = False

    def load(self, var_name, default):
        return self.surface_definitions.get(var_name, default)

    def save(self, var_name, var):
        self.surface_definitions[var_name] = var

    def on_window_resize(self, window, w, h):
        self.win_size = w, h

    def on_click(self, pos, button, action):
        if self.surface_edit_mode.value:
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0., 0.), (1., 0.), (1., 1.), (0., 1.))
                    x, y = pos
                    for s in self.surfaces:
                        if s.detected:
                            for (vx, vy), i in zip(
                                    s.ref_surface_to_img(np.array(surf_verts)),
                                    range(4)):
                                vx, vy = denormalize(
                                    (vx, vy),
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                                if sqrt((x - vx)**2 +
                                        (y - vy)**2) < 15:  #img pixels
                                    self.edit_surfaces.append((s, i))

    def advance(self):
        pass

    def add_surface(self):
        self.surfaces.append(
            Offline_Reference_Surface(
                self.g_pool,
                gaze_positions_by_frame=self.g_pool.positions_by_frame))
        self.update_bar_markers()

    def remove_surface(self, i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_bar_markers()

    def update_bar_markers(self):
        self._bar.clear()
        self._bar.add_button('close', self.unset_alive)
        self._bar.add_var("draw markers", self.draw_markers)
        self._bar.add_button("  add surface   ", self.add_surface, key='a')
        self._bar.add_var("  edit mode   ", self.surface_edit_mode)
        for s, i in zip(self.surfaces, range(len(self.surfaces)))[::-1]:
            self._bar.add_var("%s_name" % i,
                              create_string_buffer(512),
                              getter=s.atb_get_name,
                              setter=s.atb_set_name,
                              group=str(i),
                              label='name')
            self._bar.add_var("%s_markers" % i,
                              create_string_buffer(512),
                              getter=s.atb_marker_status,
                              group=str(i),
                              label='found/registered markers')
            self._bar.add_var(
                "%s_x_scale" % i,
                vtype=c_float,
                getter=s.atb_get_scale_x,
                min=1,
                setter=s.atb_set_scale_x,
                group=str(i),
                label='real width',
                help=
                'this scale factor is used to adjust the coordinate space for your needs (think photo pixels or mm or whatever)'
            )
            self._bar.add_var(
                "%s_y_scale" % i,
                vtype=c_float,
                getter=s.atb_get_scale_y,
                min=1,
                setter=s.atb_set_scale_y,
                group=str(i),
                label='real height',
                help=
                'defining x and y scale factor you atumatically set the correct aspect ratio.'
            )
            self._bar.add_var("%s_window" % i,
                              setter=s.toggle_window,
                              getter=s.window_open,
                              group=str(i),
                              label='open in window')
            self._bar.add_button("%s_hm" % i,
                                 s.generate_heatmap,
                                 label='generate_heatmap',
                                 group=str(i))
            self._bar.add_button("%s_export" % i,
                                 self.save_surface_positions_to_file,
                                 data=i,
                                 label='export surface data',
                                 group=str(i))
            self._bar.add_button("%s_remove" % i,
                                 self.remove_surface,
                                 data=i,
                                 label='remove',
                                 group=str(i))

    def update(self, frame, recent_pupil_positions, events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            # locate markers because precacher has not anayzed this frame yet. Most likely a seek event
            self.markers = []
            self.seek_marker_cacher(
                frame.index
            )  # tell precacher that it better have every thing from here analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                events.append({
                    'type': 'marker_ref_surface',
                    'name': s.name,
                    'uid': s.uid,
                    'm_to_screen': s.m_to_screen,
                    'm_from_screen': s.m_from_screen,
                    'timestamp': frame.timestamp,
                    'gaze_on_srf': s.gaze_on_srf
                })

        if self.draw_markers.value:
            draw_markers(frame.img, self.markers)

        # edit surfaces by user
        if self.surface_edit_mode:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(window))
            pos = denormalize(pos,
                              (frame.img.shape[1],
                               frame.img.shape[0]))  # Position in img pixels

            for s, v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                    new_pos = s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx, new_pos)
                    s.cache = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)

        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()

    def init_marker_cacher(self):
        forking_enable(0)  #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path = os.path.join(self.g_pool.rec_dir, 'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value(c_int, 0)
        self.cacher_run = Value(c_bool, True)
        self.cacher = Process(target=fill_cache,
                              args=(visited_list, video_file_path,
                                    self.cache_queue, self.cacher_seek_idx,
                                    self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx, c_m = self.cache_queue.get()
            self.cache.update(idx, c_m)
            for s in self.surfaces:
                s.update_cache(self.cache, idx=idx)

    def seek_marker_cacher(self, idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()

        for m in self.markers:
            hat = np.array(
                [[[0, 0], [0, 1], [.5, 1.3], [1, 1], [1, 0], [0, 0]]],
                dtype=np.float32)
            hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
            draw_gl_polyline(hat.reshape((6, 2)), (0.1, 1., 1., .5))

        for s in self.surfaces:
            s.gl_draw_frame()
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.surface_edit_mode.value:
            for s in self.surfaces:
                s.gl_draw_corners()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

        # Lines for areas that have be cached
        cached_ranges = []
        for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
            cached_ranges += (r[0], 0), (r[1], 0)  #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                    found_at += (r[0], 0), (r[1], 0
                                            )  #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = self.win_size
        h_pad = padding * (self.cache.length - 2) / float(width)
        v_pad = padding * 1. / (height - 2)
        gluOrtho2D(
            -h_pad, (self.cache.length - 1) + h_pad, -v_pad, 1 + v_pad
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8., .6, .2, 8.)
        draw_gl_polyline(cached_ranges, color=color, type='Lines', thickness=4)

        color = (0., .7, .3, 8.)

        for s in cached_surfaces:
            glTranslatef(0, .02, 0)
            draw_gl_polyline(s, color=color, type='Lines', thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()

    def save_surface_positions_to_file(self, i):
        s = self.surfaces[i]

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark

        if s.cache == None:
            logger.warning(
                "The surface is not cached. Please wait for the cacher to collect data."
            )
            return

        srf_dir = os.path.join(
            self.g_pool.rec_dir,
            'surface_data' + '_' + s.name.replace('/', '') + '_' + s.uid)
        logger.info("exporting surface gaze data to %s" % srf_dir)
        if os.path.isdir(srf_dir):
            logger.info(
                "Will overwrite previous export for this referece surface")
        else:
            try:
                os.mkdir(srf_dir)
            except:
                logger.warning("Could name make export dir %s" % srf_dir)
                return

        #save surface_positions as pickle file
        save_object(s.cache.to_list(), os.path.join(srf_dir, 'srf_positons'))

        #save surface_positions as csv
        with open(os.path.join(srf_dir, 'srf_positons.csv'), 'wb') as csvfile:
            csw_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            csw_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                 'm_from_screen', 'detected_markers'))
            for idx, ts, ref_srf_data in zip(
                    range(len(self.g_pool.timestamps)), self.g_pool.timestamps,
                    s.cache):
                if in_mark <= idx <= out_mark:
                    if ref_srf_data is not None and ref_srf_data is not False:
                        csw_writer.writerow(
                            (idx, ts, ref_srf_data['m_to_screen'],
                             ref_srf_data['m_from_screen'],
                             ref_srf_data['detected_markers']))

        #save gaze on srf as csv.
        with open(os.path.join(srf_dir, 'gaze_positions_on_surface.csv'),
                  'wb') as csvfile:
            csw_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            csw_writer.writerow(
                ('world_frame_idx', 'world_timestamp', 'eye_timestamp',
                 'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
            for idx, ts, ref_srf_data in zip(
                    range(len(self.g_pool.timestamps)), self.g_pool.timestamps,
                    s.cache):
                if in_mark <= idx <= out_mark:
                    if ref_srf_data is not None and ref_srf_data is not False:
                        for gp in ref_srf_data['gaze_on_srf']:
                            gp_x, gp_y = gp['norm_gaze_on_srf']
                            on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                            csw_writer.writerow(
                                (idx, ts, gp['timestamp'], gp_x, gp_y,
                                 gp_x * s.scale_factor[0],
                                 gp_x * s.scale_factor[1], on_srf))

        logger.info(
            "Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"
            % (s.name, s.uid))

        if s.heatmap is not None:
            logger.info("Saved Heatmap as .png file.")
            cv2.imwrite(os.path.join(srf_dir, 'heatmap.png'), s.heatmap)

        if s.detected and self.img is not None:
            #let save out the current surface image found in video

            #here we get the verts of the surface quad in norm_coords
            mapped_space_one = np.array(((0, 0), (1, 0), (1, 1), (0, 1)),
                                        dtype=np.float32).reshape(-1, 1, 2)
            screen_space = cv2.perspectiveTransform(mapped_space_one,
                                                    s.m_to_screen).reshape(
                                                        -1, 2)
            #now we convert to image pixel coods
            screen_space[:, 1] = 1 - screen_space[:, 1]
            screen_space[:, 1] *= self.img.shape[0]
            screen_space[:, 0] *= self.img.shape[1]
            s_0, s_1 = s.scale_factor
            #no we need to flip vertically again by setting the mapped_space verts accordingly.
            mapped_space_scaled = np.array(
                ((0, s_1), (s_0, s_1), (s_0, 0), (0, 0)), dtype=np.float32)
            M = cv2.getPerspectiveTransform(screen_space, mapped_space_scaled)
            #here we do the actual perspactive transform of the image.
            srf_in_video = cv2.warpPerspective(
                self.img, M, (int(s.scale_factor[0]), int(s.scale_factor[1])))
            cv2.imwrite(os.path.join(srf_dir, 'surface.png'), srf_in_video)
            logger.info("Saved current image as .png file.")

    def get_init_dict(self):
        d = {}
        if hasattr(self, '_bar'):
            gui_settings = {
                'pos': self._bar.position,
                'size': self._bar.size,
                'iconified': self._bar.iconified
            }
            d['gui_settings'] = gui_settings

        return d

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happends either voluntary or forced.
        if you have an atb bar or glfw window destroy it here.
        """

        self.save("offline_square_marker_surfaces",
                  [rs.save_to_dict() for rs in self.surfaces if rs.defined])
        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        self.surface_definitions.close()

        for s in self.surfaces:
            s.close_window()
        self._bar.destroy()
예제 #17
0
from billiard import Process, Queue


def f(q):
    q.put([42, None, 'hello'])


if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q, ))
    p.start()
    print(q.get())  # prints "[42, None, 'hello']"
    p.join()
예제 #18
0
def main():

    # To assign camera by name: put string(s) in list
    world_src = [
        "Pupil Cam1 ID2", "Logitech Camera", "(046d:081d)", "C510", "B525",
        "C525", "C615", "C920", "C930e"
    ]
    eye0 = [
        "Pupil Cam1 ID0", "HD-6000", "Integrated Camera", "HD USB Camera",
        "USB 2.0 Camera"
    ]
    eye1 = ["Pupil Cam1 ID1", "HD-6000", "Integrated Camera"]
    eye_src = eye0, eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  1 , 1 #second arg will be ignored for monocular eye trackers
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Default camera video size in pixels (width,height)
    eye_size = (640, 480)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool, 0)
    g_pool.timebase = Value(c_double, 0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular

    p_eye = []
    for eye_id in range(1 + 1 * binocular):
        eye_end, world_end = Pipe(True)
        p_eye += [
            Process(target=eye,
                    args=(g_pool, eye_src[eye_id], eye_size, eye_end, eye_id))
        ]
        p_eye[-1].start()
        #wait for ready message from eye to sequentialize startup
        logger.debug(world_end.recv())
        g_pool.eye_tx += [world_end]

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()
class Offline_Surface_Tracker(Surface_Tracker):
    """
    Special version of surface tracker for use with videofile source.
    It uses a seperate process to search all frames in the world video file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,mode="Show Markers and Surfaces",min_marker_perimeter = 100):
        super(Offline_Surface_Tracker, self).__init__(g_pool,mode,min_marker_perimeter)
        self.order = .2

        if g_pool.app == 'capture':
           raise Exception('For Player only.')

        self.marker_cache_version = 1
        self.min_marker_perimeter_cacher = 20  #find even super small markers. The surface locater will filter using min_marker_perimeter
        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        version = self.persistent_cache.get('version',0)
        cache = self.persistent_cache.get('marker_cache',None)
        if cache is None:
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            self.persistent_cache['version'] = self.marker_cache_version
        elif version != self.marker_cache_version:
            self.persistent_cache['version'] = self.marker_cache_version
            self.cache = Cache_List([False for _ in g_pool.timestamps])
            logger.debug("Marker cache version missmatch. Rebuilding marker cache.")
        else:
            self.cache = Cache_List(cache)
            logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )

        self.init_marker_cacher()
        for s in self.surfaces:
            s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter)
        self.recalculate()

    def load_surface_definitions_from_file(self):
        self.surface_definitions = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Surface Tracker')
        self.g_pool.gui.append(self.menu)
        self.add_button = ui.Thumb('add_surface',setter=lambda x: self.add_surface(),getter=lambda:False,label='Add Surface',hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu= None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        def close():
            self.alive=False

        def set_min_marker_perimeter(val):
            self.min_marker_perimeter = val
            self.notify_all_delayed({'subject':'min_marker_perimeter_changed'},delay=1)

        self.menu.elements[:] = []
        self.menu.append(ui.Button('Close',close))
        self.menu.append(ui.Slider('min_marker_perimeter',self,min=20,max=500,step=1,setter=set_min_marker_perimeter))
        self.menu.append(ui.Info_Text('The offline surface tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'))
        self.menu.append(ui.Info_Text("Press the export button or type 'e' to start the export."))
        self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Surfaces","Show marker IDs","Show Heatmaps","Show Metrics"] ))
        self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'))
        self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(ui.Button("Add surface", lambda:self.add_surface()))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s"%idx)
            s_menu.collapsed=True
            s_menu.append(ui.Text_Input('name',s))
            s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size'))
            s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size'))
            s_menu.append(ui.Button('Open Debug Window',s.open_close_window))
            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)
            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove',remove_s))
            self.menu.append(s_menu)


    def on_notify(self,notification):
        if notification['subject'] == 'gaze_positions_changed':
            logger.info('Gaze postions changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'surfaces_changed':
            logger.info('Surfaces changed. Recalculating.')
            self.recalculate()
        elif notification['subject'] == 'min_marker_perimeter_changed':
            logger.info('Min marper perimeter adjusted. Re-detecting surfaces.')
            self.invalidate_surface_caches()
        elif notification['subject'] is "should_export":
            self.save_surface_statsics_to_file(notification['range'],notification['export_dir'])


    def on_window_resize(self,window,w,h):
        self.win_size = w,h


    def add_surface(self):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = Named_Texture()
            s.metrics_texture.update_from_ndarray(heatmap)


    def invalidate_surface_caches(self):
        for s in self.surfaces:
            s.cache = None

    def update(self,frame,events):
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        # self.markers = [m for m in self.cache[frame.index] if m['perimeter'>=self.min_marker_perimeter]
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed


        events['surfaces'] = []
        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter)
            if s.detected:
                events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img,self.markers)

        elif self.mode == "Show Markers and Surfaces":
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos =  s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)
            else:
                # update srf with no or invald cache:
                for s in self.surfaces:
                    if s.cache == None and s not in [s for s,i in self.edit_surf_verts]:
                        s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter)
                        self.notify_all_delayed({'subject':'surfaces_changed'})



        #map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in events.get('gaze_positions',[]):
                    gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_pos'])))
                    p['realtime gaze on ' + s.name] = gp_on_s
                    s.gaze_on_srf.append(gp_on_s)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  self.g_pool.capture.src
        timestamps = self.g_pool.capture.timestamps
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i',0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,camera_calibration=self.camera_calibration,min_marker_perimeter=self.min_marker_perimeter,idx=idx)
            if self.cacher_run.value == False:
                self.recalculate()

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()

        super(Offline_Surface_Tracker,self).gl_display()

        if self.mode == "Show Heatmaps":
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        glOrtho(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(.8,.6,.2,.8)
        draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4)

        color = RGBA(0,.7,.3,.8)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_polyline(s,color=color,line_type=GL_LINES,thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self,export_range,export_dir):
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        metrics_dir = os.path.join(export_dir,'surfaces')
        section = export_range
        in_mark = export_range.start
        out_mark = export_range.stop
        logger.info("exporting metrics to %s"%metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s"%metrics_dir)
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp['base']['timestamp'] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('world_timestamp','world_frame_idx','gaze_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                csv_writer.writerow( (ts,idx,gp['base']['timestamp'],gp['norm_pos'][0],gp['norm_pos'][1],gp['norm_pos'][0]*s.real_world_size['x'],gp['norm_pos'][1]*s.real_world_size['y'],gp['on_srf']) )


            # save fixation on srf as csv.
            with open(os.path.join(metrics_dir,'fixations_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('id','start_timestamp','duration','start_frame','end_frame','norm_pos_x','norm_pos_y','x_scaled','y_scaled','on_srf'))
                fixations_on_surface = []
                for idx,ref_srf_data in zip(range(len(self.g_pool.timestamps)),s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_dublicates = dict([(f['base']['id'],f) for f in fixations_on_surface]).values()
                for f_on_s in removed_dublicates:
                    f = f_on_s['base']
                    f_x,f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow( (f['id'],f['timestamp'],f['duration'],f['start_frame_index'],f['end_frame_index'],f_x,f_y,f_x*s.real_world_size['x'],f_y*s.real_world_size['y'],f_on_srf) )


            logger.info("Saved surface positon gaze and fixation data for '%s' with uid:'%s'"%(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)


        logger.info("Done exporting reference surface data.")
        # if s.detected and self.img is not None:
        #     #let save out the current surface image found in video

        #     #here we get the verts of the surface quad in norm_coords
        #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
        #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
        #     #now we convert to image pixel coods
        #     screen_space[:,1] = 1-screen_space[:,1]
        #     screen_space[:,1] *= self.img.shape[0]
        #     screen_space[:,0] *= self.img.shape[1]
        #     s_0,s_1 = s.real_world_size
        #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
        #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
        #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
        #     #here we do the actual perspactive transform of the image.
        #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
        #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
        #     logger.info("Saved current image as .png file.")
        # else:
        #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
예제 #20
0
            pages.append(new_url[:-5] + "_" + str(n) + ".html")
    return pages


def scra_list_page(pages):
    ret = list()
    for page_url in pages:
        pq = PQ(url=page_url)
        ret.extend(
            re.findall(
                r"(?P<ip>\d+\.\d+\.\d+\.\d+)\:(?P<port>\d+)@(?P<pro>\w+)#",
                pq.text()))
    return ret


queue = Queue()
all = list()


def test_proxy():
    while 1:
        try:
            p = queue.get_nowait()
        except:
            break
        proxies = {
            p[2].lower(): '%s:%s' % (p[0], p[1]),
        }
        try:
            begin = time.time()
            code = requests.get("http://www.google.com.hk/",
예제 #21
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,gui_settings={'pos':(220,200),'size':(300,300),'iconified':False}):
        super(Offline_Marker_Detector, self).__init__()
        self.g_pool = g_pool
        self.gui_settings = gui_settings
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.load('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.load('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = c_int(0)
        # edit surfaces
        self.edit_surfaces = []

        #detector vars
        self.robust_detection = c_bool(1)
        self.aperture = c_int(11)
        self.min_marker_perimeter = 80

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)
        self.recent_pupil_positions = []

        self.img_shape = None
        self.img = None


    def init_gui(self):
        import atb
        pos = self.gui_settings['pos']
        atb_label = "Marker Detector"
        self._bar = atb.Bar(name =self.__class__.__name__+str(id(self)), label=atb_label,
            help="circle", color=(50, 150, 50), alpha=50,
            text='light', position=pos,refresh=.1, size=self.gui_settings['size'])
        self._bar.iconified = self.gui_settings['iconified']
        self.update_bar_markers()

        #set up bar display padding
        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))


    def unset_alive(self):
        self.alive = False

    def load(self, var_name, default):
        return self.surface_definitions.get(var_name,default)
    def save(self, var_name, var):
            self.surface_definitions[var_name] = var

    def on_window_resize(self,window,w,h):
        self.win_size = w,h


    def on_click(self,pos,button,action):
        if self.mode.value == 1:
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.))
                    x,y = pos
                    for s in self.surfaces:
                        if s.detected:
                            for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)):
                                vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True)
                                if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels
                                    self.edit_surfaces.append((s,i))

    def advance(self):
        pass

    def add_surface(self):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool,gaze_positions_by_frame=self.g_pool.positions_by_frame))
        self.update_bar_markers()

    def remove_surface(self,i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_bar_markers()

    def update_bar_markers(self):
        self._bar.clear()
        self._bar.add_button('close',self.unset_alive)
        self._bar.add_button("  add surface   ", self.add_surface, key='a')
        # when cache is updated, when surface is edited, when trimmarks are changed.
        # dropdown menue: markers and surface, surface edit mode, heatmaps, metrics
        self._bar.mode_enum = atb.enum("Mode",{"Show Markers and Frames":0,"Show Marker Id's":4, "Surface edit mode":1,"Show Heatmaps":2,"Show Metrics":3})
        self._bar.add_var("Mode",self.mode,vtype=self._bar.mode_enum)
        self._bar.add_button("  (re)-calculate gaze distributions   ", self.recalculate)
        self._bar.add_button("   Export Gaze and Surface Data   ", self.save_surface_statsics_to_file)

        for s,i in zip(self.surfaces,range(len(self.surfaces)))[::-1]:
            self._bar.add_var("%s_name"%i,create_string_buffer(512),getter=s.atb_get_name,setter=s.atb_set_name,group=str(i),label='name')
            self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' )
            self._bar.add_var("%s_x_scale"%i,vtype=c_float, getter=s.atb_get_scale_x, min=1,setter=s.atb_set_scale_x,group=str(i),label='real width', help='this scale factor is used to adjust the coordinate space for your needs (think photo pixels or mm or whatever)' )
            self._bar.add_var("%s_y_scale"%i,vtype=c_float, getter=s.atb_get_scale_y,min=1,setter=s.atb_set_scale_y,group=str(i),label='real height',help='defining x and y scale factor you atumatically set the correct aspect ratio.' )
            self._bar.add_var("%s_window"%i,setter=s.toggle_window,getter=s.window_open,group=str(i),label='open in window')
            # self._bar.add_button("%s_hm"%i, s.generate_heatmap, label='generate_heatmap',group=str(i))
            # self._bar.add_button("%s_export"%i, self.save_surface_positions_to_file,data=i, label='export surface data',group=str(i))
            self._bar.add_button("%s_remove"%i, self.remove_surface,data=i,label='remove',group=str(i))


    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc metrics:
        gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section]))
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = create_named_texture(heatmap)




    def update(self,frame,recent_pupil_positions,events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode.value == 4:
            draw_markers(frame.img,self.markers)

        # edit surfaces by user
        if self.mode.value == 1:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value(c_int,0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,idx=idx)

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode.value in (0,1):
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.3),type='Polygon')
                draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.6))

            for s in self.surfaces:
                s.gl_draw_frame()

        if self.mode.value == 1:
            for s in  self.surfaces:
                s.gl_draw_corners()
        if self.mode.value == 2:
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode.value == 3:
            #draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        gluOrtho2D(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = (8.,.6,.2,8.)
        draw_gl_polyline(cached_ranges,color=color,type='Lines',thickness=4)

        color = (0.,.7,.3,8.)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_gl_polyline(s,color=color,type='Lines',thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark


        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark,out_mark)


        metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark))
        logger.info("exporting metrics to %s"%metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s"%metrics_dir)
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in ref_srf_data['gaze_on_srf']:
                                gp_x,gp_y = gp['norm_gaze_on_srf']
                                on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                                csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.scale_factor[0],gp_x*s.scale_factor[1],on_srf) )

            logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)

            # if s.detected and self.img is not None:
            #     #let save out the current surface image found in video

            #     #here we get the verts of the surface quad in norm_coords
            #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
            #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
            #     #now we convert to image pixel coods
            #     screen_space[:,1] = 1-screen_space[:,1]
            #     screen_space[:,1] *= self.img.shape[0]
            #     screen_space[:,0] *= self.img.shape[1]
            #     s_0,s_1 = s.scale_factor
            #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
            #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
            #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
            #     #here we do the actual perspactive transform of the image.
            #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.scale_factor[0]),int(s.scale_factor[1])) )
            #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
            #     logger.info("Saved current image as .png file.")
            # else:
            #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def get_init_dict(self):
        d = {}
        if hasattr(self,'_bar'):
            gui_settings = {'pos':self._bar.position,'size':self._bar.size,'iconified':self._bar.iconified}
            d['gui_settings'] = gui_settings

        return d

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happends either voluntary or forced.
        if you have an atb bar or glfw window destroy it here.
        """

        self.save("offline_square_marker_surfaces",[rs.save_to_dict() for rs in self.surfaces if rs.defined])
        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        self.surface_definitions.close()

        for s in self.surfaces:
            s.close_window()
        self._bar.destroy()
예제 #22
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self, g_pool, mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.order = .2

        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
            raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',
                                        []) != []:
            logger.debug(
                "Found ref surfaces defined or copied in previous session.")
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'offline_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',
                                          []) != []:
            logger.debug(
                "Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture."
            )
            self.surfaces = [
                Offline_Reference_Surface(self.g_pool, saved_definition=d)
                for d in self.surface_definitions.get(
                    'realtime_square_marker_surfaces', [])
                if isinstance(d, dict)
            ]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

        # ui mode settings
        self.mode = mode
        # edit surfaces
        self.edit_surfaces = []

        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(
            os.path.join(g_pool.rec_dir, 'square_marker_cache'))
        self.cache = Cache_List(
            self.persistent_cache.get('marker_cache',
                                      [False for _ in g_pool.timestamps]))
        logger.debug(
            "Loaded marker cache %s / %s frames had been searched before" %
            (len(self.cache) - self.cache.count(False), len(self.cache)))
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None

    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Marker Tracker')
        self.g_pool.gui.append(self.menu)

        self.add_button = ui.Thumb('add_surface',
                                   setter=self.add_surface,
                                   getter=lambda: False,
                                   label='Add Surface',
                                   hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),
                              *glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu = None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        pass
        self.menu.elements[:] = []
        self.menu.append(
            ui.Info_Text(
                'The offline marker tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'
            ))
        self.menu.append(ui.Button('Close', self.close))
        self.menu.append(
            ui.Selector('mode',
                        self,
                        label='Mode',
                        selection=[
                            "Show Markers and Frames", "Show marker IDs",
                            "Surface edit mode", "Show Heatmaps",
                            "Show Metrics"
                        ]))
        self.menu.append(
            ui.Info_Text(
                'To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'
            ))
        self.menu.append(
            ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(
            ui.Button("Export gaze and surface data",
                      self.save_surface_statsics_to_file))
        self.menu.append(
            ui.Button("Add surface", lambda: self.add_surface('_')))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s" % idx)
            s_menu.collapsed = True
            s_menu.append(ui.Text_Input('name', s))
            s_menu.append(ui.Text_Input('x', s.real_world_size,
                                        label='X size'))
            s_menu.append(ui.Text_Input('y', s.real_world_size,
                                        label='Y size'))
            s_menu.append(ui.Button('Open Debug Window', s.open_close_window))

            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)

            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove', remove_s))
            self.menu.append(s_menu)

    def close(self):
        self.alive = False

    def on_window_resize(self, window, w, h):
        self.win_size = w, h

    def on_click(self, pos, button, action):
        if self.mode == "Surface edit mode":
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0., 0.), (1., 0.), (1., 1.), (0., 1.))
                    x, y = pos
                    for s in self.surfaces:
                        if s.detected and s.defined:
                            for (vx, vy), i in zip(
                                    s.ref_surface_to_img(np.array(surf_verts)),
                                    range(4)):
                                vx, vy = denormalize(
                                    (vx, vy),
                                    (self.img_shape[1], self.img_shape[0]),
                                    flip_y=True)
                                if sqrt((x - vx)**2 +
                                        (y - vy)**2) < 15:  #img pixels
                                    self.edit_surfaces.append((s, i))

    def advance(self):
        pass

    def add_surface(self, _):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def remove_surface(self, i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_gui_markers()

    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark, out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results, dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255. / max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s, c_map in zip(self.surfaces, results_c_maps):
            heatmap = np.ones((1, 1, 4), dtype=np.uint8) * 125
            heatmap[:, :, :3] = c_map
            s.metrics_texture = create_named_texture(heatmap.shape)
            update_named_texture(s.metrics_texture, heatmap)

    def update(self, frame, events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(
                frame.index
            )  # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                pass
                # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img, self.markers)

        # edit surfaces by user
        if self.mode == "Surface edit mode":
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(window), flip_y=True)

            for s, v_idx in self.edit_surfaces:
                if s.detected:
                    new_pos = s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx, new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)

        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()

    def init_marker_cacher(self):
        forking_enable(0)  #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path = os.path.join(self.g_pool.rec_dir, 'world.mkv')
        if not os.path.isfile(video_file_path):
            video_file_path = os.path.join(self.g_pool.rec_dir, 'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i', 0)
        self.cacher_run = Value(c_bool, True)
        self.cacher = Process(target=fill_cache,
                              args=(visited_list, video_file_path,
                                    self.cache_queue, self.cacher_seek_idx,
                                    self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx, c_m = self.cache_queue.get()
            self.cache.update(idx, c_m)
            for s in self.surfaces:
                s.update_cache(self.cache, idx=idx)

    def seek_marker_cacher(self, idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode == "Show Markers and Frames":
            for m in self.markers:
                hat = np.array([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
                               dtype=np.float32)
                hat = cv2.perspectiveTransform(hat, m_marker_to_screen(m))
                draw_polyline(hat.reshape((5, 2)),
                              color=RGBA(0.1, 1., 1., .3),
                              line_type=GL_POLYGON)
                draw_polyline(hat.reshape((5, 2)), color=RGBA(0.1, 1., 1., .6))

            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)

        if self.mode == "Surface edit mode":
            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)
                s.gl_draw_corners()

        if self.mode == "Show Heatmaps":
            for s in self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

        # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges:  # [[0,1],[3,4]]
            cached_ranges += (r[0], 0), (r[1], 0)  #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges:  # [[0,1],[3,4]]
                    found_at += (r[0], 0), (r[1], 0
                                            )  #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width, height = self.win_size
        h_pad = padding * (self.cache.length - 2) / float(width)
        v_pad = padding * 1. / (height - 2)
        glOrtho(
            -h_pad, (self.cache.length - 1) + h_pad, -v_pad, 1 + v_pad, -1, 1
        )  # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)

        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(8., .6, .2, 8.)
        draw_polyline(cached_ranges,
                      color=color,
                      line_type=GL_LINES,
                      thickness=4)

        color = RGBA(0., .7, .3, 8.)

        for s in cached_surfaces:
            glTranslatef(0, .02, 0)
            draw_polyline(s, color=color, line_type=GL_LINES, thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()

    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark, out_mark)

        metrics_dir = os.path.join(self.g_pool.rec_dir,
                                   "metrics_%s-%s" % (in_mark, out_mark))
        logger.info("exporting metrics to %s" % metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s" % metrics_dir)
                return

        with open(os.path.join(metrics_dir, 'surface_visibility.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count', frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning(
                        "The surface is not cached. Please wait for the cacher to collect data."
                    )
                    return
                visible_count = s.visible_count_in_section(section)
                csv_writer.writerow((s.name, visible_count))
            logger.info("Created 'surface_visibility.csv' file")

        with open(os.path.join(metrics_dir, 'surface_gaze_distribution.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(
                chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(
                ('total_gaze_point_count', len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'gaze_count'))

            for s in self.surfaces:
                gaze_on_srf = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set(
                    [gp['base']["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow((s.name, len(gaze_on_srf)))

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf)))
            logger.info("Created 'surface_gaze_distribution.csv' file")

        with open(os.path.join(metrics_dir, 'surface_events.csv'),
                  'wb') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter='\t',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number', 'timestamp', 'surface_name',
                                 'surface_uid', 'event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
                    events.append({
                        'frame_id': enter_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'enter'
                    })
                    events.append({
                        'frame_id': exit_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'exit'
                    })

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow(
                    (e['frame_id'], self.g_pool.timestamps[e['frame_id']],
                     e['srf_name'], e['srf_uid'], e['event']))
            logger.info("Created 'surface_events.csv' file")

        for s in self.surfaces:
            # per surface names:
            surface_name = '_' + s.name.replace('/', '') + '_' + s.uid

            # save surface_positions as pickle file
            save_object(
                s.cache.to_list(),
                os.path.join(metrics_dir, 'srf_positions' + surface_name))

            #save surface_positions as csv
            with open(
                    os.path.join(metrics_dir,
                                 'srf_positons' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                     'm_from_screen', 'detected_markers'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow(
                                (idx, ts, ref_srf_data['m_to_screen'],
                                 ref_srf_data['m_from_screen'],
                                 ref_srf_data['detected_markers']))

            # save gaze on srf as csv.
            with open(
                    os.path.join(
                        metrics_dir,
                        'gaze_positions_on_surface' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(
                    ('world_timestamp', 'world_frame_idx', 'gaze_timestamp',
                     'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                csv_writer.writerow(
                                    (ts, idx, gp['base']['timestamp'],
                                     gp['norm_pos'][0], gp['norm_pos'][1],
                                     gp['norm_pos'][0] *
                                     s.real_world_size['x'],
                                     gp['norm_pos'][1] *
                                     s.real_world_size['y'], gp['on_srf']))

            # # save fixation on srf as csv.
            with open(
                    os.path.join(
                        metrics_dir,
                        'fixations_on_surface' + surface_name + '.csv'),
                    'wb') as csvfile:
                csv_writer = csv.writer(csvfile,
                                        delimiter='\t',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(
                    ('id', 'start_timestamp', 'duration', 'start_frame',
                     'end_frame', 'norm_pos_x', 'norm_pos_y', 'x_scaled',
                     'y_scaled', 'on_srf'))
                fixations_on_surface = []
                for idx, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)), s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_dublicates = dict([
                    (f['base']['id'], f) for f in fixations_on_surface
                ]).values()
                for f_on_s in removed_dublicates:
                    f = f_on_s['base']
                    f_x, f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow(
                        (f['id'], f['timestamp'], f['duration'],
                         f['start_frame_index'], f['end_frame_index'], f_x,
                         f_y, f_x * s.real_world_size['x'],
                         f_y * s.real_world_size['y'], f_on_srf))

            logger.info(
                "Saved surface positon gaze and fixation data for '%s' with uid:'%s'"
                % (s.name, s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(
                    os.path.join(metrics_dir,
                                 'heatmap' + surface_name + '.png'), s.heatmap)

        logger.info("Done exporting reference surface data.")
        # if s.detected and self.img is not None:
        #     #let save out the current surface image found in video

        #     #here we get the verts of the surface quad in norm_coords
        #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
        #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
        #     #now we convert to image pixel coods
        #     screen_space[:,1] = 1-screen_space[:,1]
        #     screen_space[:,1] *= self.img.shape[0]
        #     screen_space[:,0] *= self.img.shape[1]
        #     s_0,s_1 = s.real_world_size
        #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
        #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
        #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
        #     #here we do the actual perspactive transform of the image.
        #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
        #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
        #     logger.info("Saved current image as .png file.")
        # else:
        #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)

    def get_init_dict(self):
        return {'mode': self.mode}

    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [
            rs.save_to_dict() for rs in self.surfaces if rs.defined
        ]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
예제 #23
0
class Offline_Marker_Detector(Plugin):
    """
    Special version of marker detector for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """

    def __init__(self,g_pool,menu_conf={'pos':(300,200),'size':(300,300),'collapsed':False},mode="Show Markers and Frames"):
        super(Offline_Marker_Detector, self).__init__(g_pool)
        self.menu_conf = menu_conf
        self.order = .2


        # all markers that are detected in the most recent frame
        self.markers = []
        # all registered surfaces

        if g_pool.app == 'capture':
           raise Exception('For Player only.')
        #in player we load from the rec_dir: but we have a couple options:
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []


        # ui mode settings
        self.mode = mode
        # edit surfaces
        self.edit_surfaces = []


        #check if marker cache is available from last session
        self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache'))
        self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps]))
        logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) )
        self.init_marker_cacher()

        #debug vars
        self.show_surface_idx = c_int(0)

        self.img_shape = None
        self.img = None



    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Marker Tracker')
        self.menu.configuration = self.menu_conf
        self.g_pool.gui.append(self.menu)


        self.add_button = ui.Thumb('add_surface',setter=self.add_surface,getter=lambda:False,label='Add Surface',hotkey='a')
        self.g_pool.quickbar.append(self.add_button)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))

    def deinit_gui(self):
        if self.menu:
            self.g_pool.gui.remove(self.menu)
            self.menu_conf= self.menu.configuration
            self.menu= None
        if self.add_button:
            self.g_pool.quickbar.remove(self.add_button)
            self.add_button = None

    def update_gui_markers(self):
        pass
        # self._bar.clear()
        self.menu.elements[:] = []
        self.menu.append(ui.Info_Text('The offline marker tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'))
        self.menu.append(ui.Button('Close',self.close))
        self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Frames","Show marker IDs", "Surface edit mode","Show Heatmaps","Show Metrics"] ))
        self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'))
        self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate))
        self.menu.append(ui.Button("Export gaze and surface data", self.save_surface_statsics_to_file))
        self.menu.append(ui.Button("Add surface", lambda:self.add_surface('_')))
        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s"%idx)
            s_menu.collapsed=True
            s_menu.append(ui.Text_Input('name',s))
            #     self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' )
            s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size'))
            s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size'))
            s_menu.append(ui.Button('Open Debug Window',s.open_close_window))
            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)
            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove',remove_s))
            self.menu.append(s_menu)



    def close(self):
        self.alive = False

    def on_window_resize(self,window,w,h):
        self.win_size = w,h

    def on_click(self,pos,button,action):
        if self.mode=="Surface edit mode":
            if self.edit_surfaces:
                if action == GLFW_RELEASE:
                    self.edit_surfaces = []
            # no surfaces verts in edit mode, lets see if the curser is close to one:
            else:
                if action == GLFW_PRESS:
                    surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.))
                    x,y = pos
                    for s in self.surfaces:
                        if s.detected and s.defined:
                            for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)):
                                vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True)
                                if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels
                                    self.edit_surfaces.append((s,i))

    def advance(self):
        pass

    def add_surface(self,_):
        self.surfaces.append(Offline_Reference_Surface(self.g_pool))
        self.update_gui_markers()

    def remove_surface(self,i):
        self.surfaces[i].cleanup()
        del self.surfaces[i]
        self.update_gui_markers()


    def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc metrics:
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = create_named_texture(heatmap.shape)
            update_named_texture(s.metrics_texture,heatmap)




    def update(self,frame,events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                pass
                # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode == "Show marker IDs":
            draw_markers(frame.img,self.markers)

        # edit surfaces by user
        if self.mode == "Surface edit mode":
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()


    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from marker_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  os.path.join(self.g_pool.rec_dir,'world.mkv')
        if not os.path.isfile(video_file_path):
            video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i',0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,idx=idx)

    def seek_marker_cacher(self,idx):
        self.cacher_seek_idx.value = idx

    def close_marker_cacher(self):
        self.update_marker_cache()
        self.cacher_run.value = False
        self.cacher.join()

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        self.gl_display_cache_bars()
        for s in self.surfaces:
            s.gl_display_in_window(self.g_pool.image_tex)

        if self.mode == "Show Markers and Frames":
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.3),line_type=GL_POLYGON)
                draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.6))

            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)

        if self.mode == "Surface edit mode":
            for s in self.surfaces:
                s.gl_draw_frame(self.img_shape)
                s.gl_draw_corners()

        if self.mode == "Show Heatmaps":
            for s in  self.surfaces:
                s.gl_display_heatmap()
        if self.mode == "Show Metrics":
            #todo: draw a backdrop to represent the gaze that is not on any surface
            for s in self.surfaces:
                #draw a quad on surface with false color of value.
                s.gl_display_metrics()

    def gl_display_cache_bars(self):
        """
        """
        padding = 20.

       # Lines for areas that have been cached
        cached_ranges = []
        for r in self.cache.visited_ranges: # [[0,1],[3,4]]
            cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]

        # Lines where surfaces have been found in video
        cached_surfaces = []
        for s in self.surfaces:
            found_at = []
            if s.cache is not None:
                for r in s.cache.positive_ranges: # [[0,1],[3,4]]
                    found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)]
                cached_surfaces.append(found_at)

        glMatrixMode(GL_PROJECTION)
        glPushMatrix()
        glLoadIdentity()
        width,height = self.win_size
        h_pad = padding * (self.cache.length-2)/float(width)
        v_pad = padding* 1./(height-2)
        glOrtho(-h_pad,  (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical)


        glMatrixMode(GL_MODELVIEW)
        glPushMatrix()
        glLoadIdentity()

        color = RGBA(8.,.6,.2,8.)
        draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4)

        color = RGBA(0.,.7,.3,8.)

        for s in cached_surfaces:
            glTranslatef(0,.02,0)
            draw_polyline(s,color=color,line_type=GL_LINES,thickness=2)

        glMatrixMode(GL_PROJECTION)
        glPopMatrix()
        glMatrixMode(GL_MODELVIEW)
        glPopMatrix()


    def save_surface_statsics_to_file(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark


        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        section = slice(in_mark,out_mark)


        metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark))
        logger.info("exporting metrics to %s"%metrics_dir)
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning("Could not make metrics dir %s"%metrics_dir)
                return


        with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count',frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning("The surface is not cached. Please wait for the cacher to collect data.")
                    return
                visible_count  = s.visible_count_in_section(section)
                csv_writer.writerow( (s.name, visible_count) )
            logger.info("Created 'surface_visibility.csv' file")


        with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # gaze distribution report
            gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name','gaze_count'))

            for s in self.surfaces:
                gaze_on_srf  = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow( (s.name, len(gaze_on_srf)) )

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) )
            logger.info("Created 'surface_gaze_distribution.csv' file")



        with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)

            # surface events report
            csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id,exit_frame_id in s.cache.positive_ranges:
                    events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'})
                    events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'})

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) )
            logger.info("Created 'surface_events.csv' file")


        for s in self.surfaces:
            # per surface names:
            surface_name = '_'+s.name.replace('/','')+'_'+s.uid


            # save surface_positions as pickle file
            save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) )


            # save gaze on srf as csv.
            with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL)
                csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf'))
                for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']):
                                gp_x,gp_y = gp['norm_pos']
                                on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1)
                                csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.real_world_size['x'],gp_x*s.real_world_size['y'],on_srf) )

            logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap)

            # if s.detected and self.img is not None:
            #     #let save out the current surface image found in video

            #     #here we get the verts of the surface quad in norm_coords
            #     mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
            #     screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
            #     #now we convert to image pixel coods
            #     screen_space[:,1] = 1-screen_space[:,1]
            #     screen_space[:,1] *= self.img.shape[0]
            #     screen_space[:,0] *= self.img.shape[1]
            #     s_0,s_1 = s.real_world_size
            #     #no we need to flip vertically again by setting the mapped_space verts accordingly.
            #     mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
            #     M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
            #     #here we do the actual perspactive transform of the image.
            #     srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
            #     cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video)
            #     logger.info("Saved current image as .png file.")
            # else:
            #     logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def get_init_dict(self):
        if self.menu:
            d = {'menu_conf':self.menu.configuration,'mode':self.mode}
        else:
            d = {'menu_conf':self.menu_conf,'mode':self.mode}
        return d



    def cleanup(self):
        """ called when the plugin gets terminated.
        This happens either voluntarily or forced.
        if you have a GUI or glfw window destroy it here.
        """

        self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined]
        self.surface_definitions.close()

        self.close_marker_cacher()
        self.persistent_cache["marker_cache"] = self.cache.to_list()
        self.persistent_cache.close()

        for s in self.surfaces:
            s.close_window()
        self.deinit_gui()
예제 #24
0
def output_monitor_queue(out):
    q = Queue()
    p = Process(target=enqueue_output, args=(out, q))
    p.start()
    return q, p
class Offline_Screen_Tracker(Offline_Surface_Tracker,Screen_Tracker):
    """
    Special version of screen tracker for use with videofile source.
    It uses a seperate process to search all frames in the world.avi file for markers.
     - self.cache is a list containing marker positions for each frame.
     - self.surfaces[i].cache is a list containing surface positions for each frame
    Both caches are build up over time. The marker cache is also session persistent.
    See marker_tracker.py for more info on this marker tracker.
    """
    def __init__(self,g_pool,mode="Show Markers and Surfaces", min_marker_perimeter = 100,robust_detection=True, matrix=None):
        #self.g_pool = g_pool
        Trim_Marks_Extended_Exist = False
        for p in g_pool.plugins:
            if p.class_name == 'Trim_Marks_Extended':
                Trim_Marks_Extended_Exist = True
                break

        if not Trim_Marks_Extended_Exist:
            from trim_marks_patch import Trim_Marks_Extended
            g_pool.plugins.add(Trim_Marks_Extended)
            del Trim_Marks_Extended

        # heatmap
        self.matrix = matrix
        self.heatmap_blur = True
        self.heatmap_blur_gradation = 0.12
        self.heatmap_colormap = "viridis"
        self.gaze_correction_block_size = '1000'
        self.gaze_correction_min_confidence = 0.98
        self.gaze_correction_k = 2
        self.heatmap_use_kdata = False
        super(Offline_Screen_Tracker, self).__init__(g_pool,mode,min_marker_perimeter,robust_detection)

    def load_surface_definitions_from_file(self):
        self.surface_definitions = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'surface_definitions'))
        if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []:
            logger.debug("Found ref surfaces defined or copied in previous session.")
            self.surfaces = [Offline_Reference_Surface_Extended(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)]
        elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []:
            logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.")
            self.surfaces = [Offline_Reference_Surface_Extended(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)]
        else:
            logger.debug("No surface defs found. Please define using GUI.")
            self.surfaces = []

    def init_gui(self):
        self.menu = ui.Scrolling_Menu('Offline Screen Tracker')
        self.g_pool.gui.append(self.menu)
        self.update_gui_markers()

        self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext()))

    def init_marker_cacher(self):
        forking_enable(0) #for MacOs only
        from screen_detector_cacher import fill_cache
        visited_list = [False if x == False else True for x in self.cache]
        video_file_path =  self.g_pool.capture.source_path
        timestamps = self.g_pool.capture.timestamps
        self.cache_queue = Queue()
        self.cacher_seek_idx = Value('i',0)
        self.cacher_run = Value(c_bool,True)
        self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
        self.cacher.start()

    def update_marker_cache(self):
        while not self.cache_queue.empty():
            idx,c_m = self.cache_queue.get()
            self.cache.update(idx,c_m)
            for s in self.surfaces:
                s.update_cache(self.cache,camera_calibration=self.camera_calibration,min_marker_perimeter=self.min_marker_perimeter,min_id_confidence=self.min_id_confidence,idx=idx)
            # if self.cacher_run.value == False:
            #     self.recalculate()

# function TMatrixForm.GetMatrix(AMonitor: integer): TStmMatrix;
# var
#   i,j,
#   LRowCount,LColCount,SHeight,SWidth,SYGap,SXGap,SLeft,STop: integer;
# begin
#   SHeight := 150;
#   SWidth := 150;
#   SYGap := 100;
#   SXGap := 100;
#   SLeft := 0;
#   STop := 0;
#   LRowCount := 3;
#   LColCount := 3;
#   SetLength(Result, LRowCount,LColCount);
#   for i := Low(Result) to High(Result) do
#     begin
#       SLeft := ((SWidth+SXGap)*i)+(Screen.Monitors[AMonitor].Width div 2)
#                   -(((SWidth+SXGap)*LColCount) div 2)+((SXGap) div 2);
#       for j:= Low(Result[i]) to High(Result[i]) do
#         begin
#           STop := ((SHeight+SYGap)*j)+(Screen.Monitors[AMonitor].Height div 2)
#                -(((SHeight+SYGap)*LRowCount) div 2)+(SYGap div 2);
#           Result[i][j].Left := SLeft;
#           Result[i][j].Top := STop;
#           Result[i][j].Width := SWidth;
#           Result[i][j].Height := SHeight;
#         end;
#     end;
# end;

    def matrix_segmentation(self):
        if not self.mode == 'Show Markers and Surfaces':
            logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.')
            return 
        screen_width = 1280
        screen_height = -768

        def move_srf_to_stm(s,p):
            """
            ######### #########
            # 0 . 1 # # lt.rt #
            # .   . # # .   . #
            # 3 . 2 # # lb.rb #
            # uv #### #########

            #########
            # 3 . 2 #
            # .   . #
            # 0 . 1 #
            # sv #### 
            """
            sw = 150./screen_width
            sh = 150./screen_height

            before = s.markers.values()[0].uv_coords
            #before = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32)
            after = before.copy() 
            after[0] = p
            after[1] = p + np.array([sw,0])
            after[2] = p + np.array([sw,sh])
            after[3] = p + np.array([0,sh])

            transform = cv2.getPerspectiveTransform(after,before)
            for m in s.markers.values():
                m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform)

        n = 3
        namei = 0
        for i in xrange(0,n):
            for j in xrange(0,n):
                namei += 1
                sname = 'S'+str(namei)
                for s in self.surfaces:
                    if s.name == sname:
                        move_srf_to_stm(s, self.matrix[i][j])

        for s in self.surfaces:        
            s.invalidate()
        
        self.update_gui_markers()
             
    def add_matrix_surfaces(self):
        if not self.mode == 'Show Markers and Surfaces':
            logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.')
            return 

        screen_width = 1280
        screen_height = -768
        def midpoint(v1, v2):
            return np.array([(v1[0]+v2[0])/2,(v1[1]+v2[1])/2])

        def get_m(s, n=3):
            def get_coord(index,midxy, y=False):
                if y:
                  rws = screen_height # must flip
                else:
                  rws = screen_width

                return ((250./rws)*index)+midxy-(((250./rws)*n)/2)+((100./rws)/2)
            rwsx = screen_width
            rwsy = screen_height
            lt = s.left_top
            rt = s.right_top
            lb = s.left_bottom
            rb = s.right_bottom
            m = [[[] for _ in xrange(0,n)] for _ in xrange(0,n)]
            for j in xrange(0,n):
                xt = get_coord(j,midpoint(lt,rt)[0])
                # yt = get_coord(j,midpoint(lt,rt)[1])
                # xb = get_coord(j,midpoint(lb,rb)[0])
                # yb = get_coord(j,midpoint(lb,rb)[1])
                for i in xrange(0,n):
                    yt = get_coord(i,midpoint(lt,rb)[1],True) 
                    # yt = get_coord(i,midpoint([xt,yt],[xb,yb])[1],True) 
                    m[i][j] = np.array([xt, yt])
            return m
 
        def create_surface(name):
            self.surfaces.append(Offline_Reference_Surface_Extended(self.g_pool))
            self.surfaces[-1].name = name
            self.surfaces[-1].real_world_size['x'] = 150
            self.surfaces[-1].real_world_size['y'] = 150
            # self.surfaces[-1].markers = markers

        n = 3
        for s in self.surfaces:
            if s.name == 'Screen':
                self.matrix = get_m(s,n)
                # markers = s.markers
        
        for i in xrange(0,n*n):
            create_surface('S'+str(i+1))

        for s in self.surfaces:        
            s.invalidate()
        
        self.update_gui_markers()
                  

    def screen_segmentation(self):
        """
        no standards here, uv_coords ordering differing from the surface vertice one.

        0 . 1
        .   .
        3 . 2
        uv 

        3 . 2
        .   .
        0 . 1
        sv

        """
        if not self.mode == 'Show Markers and Surfaces':
            logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.')
            return

        correcly_named = [False, False]
        for s in self.surfaces:
            if s.name == 'Left':
                correcly_named[0] = (s.name == 'Left')
            if s.name == 'Right':
                correcly_named[1] = (s.name == 'Right')

        if not (correcly_named[0] and correcly_named[1]):
            logger.error('Please, create two identical surfaces and name them as "Left" and "Right".')
            return
            
        for s in self.surfaces:
            s.real_world_size['x'] = s.real_world_size['x']/2.
            lt = s.left_top
            rt = s.right_top
            lb = s.left_bottom
            rb = s.right_bottom

            midtop = np.array([(lt[0]+rt[0])/2,(lt[1]+rt[1])/2])
            midbottom = np.array([(lb[0]+rb[0])/2,(lb[1]+rb[1])/2])

            if s.name == 'Left':
                s.right_top = midtop
                s.right_bottom = midbottom

            if s.name == 'Right':
                s.left_top = midtop
                s.left_bottom = midbottom

        self.update_gui_markers()

    def raise_bug(self):
        raise 's'

    def update_gui_markers(self):

        def close():
            self.alive = False

        def set_min_marker_perimeter(val):
            self.min_marker_perimeter = val
            self.notify_all_delayed({'subject':'min_marker_perimeter_changed'},delay=1)

        self.menu.elements[:] = []
        self.menu.append(ui.Button('Close',close))
        self.menu.append(ui.Slider('min_marker_perimeter',self,min=20,max=500,step=1,setter=set_min_marker_perimeter))
        self.menu.append(ui.Info_Text('The offline screen tracker will look for a screen for each frame of the video. By default it uses surfaces defined in capture. You can change and add more surfaces here.'))
        self.menu.append(ui.Selector('mode',self,setter=self.set_mode,label='Mode',selection=["Show Markers and Surfaces","Show marker IDs","Show Heatmaps","Show Gaze Cloud", "Show Kmeans Correction","Show Mean Correction","Show Metrics"] ))
        
        if self.mode == 'Show Markers and Surfaces':
            self.menu.append(ui.Info_Text('To split the screen in two (left,right) surfaces 1) add two surfaces; 2) name them as "Left" and "Right"; 3) press Left Right segmentation'))
            self.menu.append(ui.Button("Left Right segmentation",self.screen_segmentation))
            self.menu.append(ui.Button("Matrix segmentation", self.matrix_segmentation))
            self.menu.append(ui.Button("Add M surfaces", self.add_matrix_surfaces))
            self.menu.append(ui.Button("bug", self.raise_bug))
        if self.mode == 'Show Kmeans Correction':
            self.menu.append(ui.Info_Text('Gaze Correction requires a non segmented screen. It requires k equally distributed stimuli on the screen.'))
            self.menu.append(ui.Text_Input('gaze_correction_block_size',self,label='Block Size'))
            self.menu.append(ui.Slider('gaze_correction_min_confidence',self,min=0.0,step=0.01,max=1.0,label='Minimun gaze confidence'))
            self.menu.append(ui.Slider('gaze_correction_k',self,min=1,step=1,max=24,label='K clusters'))

        if self.mode == 'Show Gaze Cloud':
            self.menu.append(ui.Slider('gaze_correction_min_confidence',self,min=0.0,step=0.01,max=1.0,label='Minimun gaze confidence'))
            self.menu.append(ui.Slider('gaze_correction_k',self,min=1,step=1,max=24,label='K clusters'))

        if self.mode == 'Show Heatmaps':
            self.menu.append(ui.Info_Text('Heatmap Settings'))
            self.menu.append(ui.Switch('heatmap_blur',self,label='Blur'))
            self.menu.append(ui.Slider('heatmap_blur_gradation',self,min=0.01,step=0.01,max=1.0,label='Blur Gradation'))
            self.menu.append(ui.Selector('heatmap_colormap',self,label='Color Map',selection=['magma', 'inferno', 'plasma', 'viridis', 'jet']))
            self.menu.append(ui.Switch('heatmap_use_kdata',self,label='Use K Data'))

        self.menu.append(ui.Info_Text('Select a section. To see heatmap, surface metrics, gaze cloud or gaze correction visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.'))
        self.menu.append(ui.Button("(Re)-calculate gaze distributions",self.recalculate))
        self.menu.append(ui.Info_Text('To use data from all sections to generate visualizations click the next button instead.'))
        self.menu.append(ui.Button("(Re)-calculate",self.recalculate_all_sections))
        self.menu.append(ui.Button("Add screen surface",lambda:self.add_surface('_')))
        
        self.menu.append(ui.Info_Text('Export gaze metrics. We recalculate metrics for each section when exporting all sections. Press the recalculate button before export the current selected section.'))
        self.menu.append(ui.Info_Text("Press the export button or type 'e' to start the export for the current section."))
        self.menu.append(ui.Button("Export all sections", self.export_all_sections))

        self.menu.append(ui.Info_Text('Requires segmentation plugin.'))
        self.menu.append(ui.Button("Export all distances", self.export_all_distances))
        self.menu.append(ui.Button("Precision Report", self.precision_report))
        self.menu.append(ui.Button("Slice 1.5 - precision", self.export_all_precision))

        for s in self.surfaces:
            idx = self.surfaces.index(s)
            s_menu = ui.Growing_Menu("Surface %s"%idx)
            s_menu.collapsed=True
            s_menu.append(ui.Text_Input('name',s))
            s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size'))
            s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size'))
            s_menu.append(ui.Button('Open Debug Window',s.open_close_window))
            #closure to encapsulate idx
            def make_remove_s(i):
                return lambda: self.remove_surface(i)
            remove_s = make_remove_s(idx)
            s_menu.append(ui.Button('remove',remove_s))
            self.menu.append(s_menu)

    def set_mode(self, value):
        self.mode = value
        self.update_gui_markers()

    def add_surface(self,_):
        self.surfaces.append(Offline_Reference_Surface_Extended(self.g_pool))

        self.surfaces[0].name = 'Screen'
        self.surfaces[0].real_world_size['x'] = 1280
        self.surfaces[0].real_world_size['y'] = 768

        # self.surfaces[0].name = 'Left'
        # self.surfaces[0].real_world_size['x'] = 1280
        # self.surfaces[0].real_world_size['y'] = 768

        # self.surfaces[1].name = 'Right'
        # self.surfaces[1].real_world_size['x'] = 1280
        # self.surfaces[1].real_world_size['y'] = 768

        self.update_gui_markers()

    # def update(self,frame,events):
    #     super(Offline_Screen_Tracker, self).update(frame, events)
    #     # locate surfaces
    #     for s in self.surfaces:
    #         if not s.locate_from_cache(frame.index):
    #             s.locate(self.markers)
    #     #     if s.detected:
    #     #         pass
    #     #         # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

    def recalculate(self):
        pass

    # def recalculate(self):
    #     #super(Offline_Screen_Tracker, self).recalculate()
    #     # calc heatmaps
    #     in_mark = self.g_pool.trim_marks.in_mark
    #     out_mark = self.g_pool.trim_marks.out_mark
    #     section = slice(in_mark,out_mark)
        
    #     for s in self.surfaces:
    #         if s.defined:
    #             s.heatmap_blur = self.heatmap_blur
    #             s.heatmap_blur_gradation = self.heatmap_blur_gradation
    #             s.heatmap_colormap = self.heatmap_colormap
    #             s.heatmap_use_kdata = self.heatmap_use_kdata
    #             s.gaze_correction_block_size = self.gaze_correction_block_size
    #             s.gaze_correction_min_confidence = self.gaze_correction_min_confidence
    #             s.gaze_correction_k = self.gaze_correction_k

    #             s.generate_gaze_cloud(section)
    #             s.generate_gaze_correction(section)
    #             s.generate_mean_correction(section)
    #             s.generate_heatmap(section)


    #     # calc distirbution accross all surfaces.
    #     results = []
    #     for s in self.surfaces:
    #         gaze_on_srf  = s.gaze_on_srf_in_section(section)
    #         results.append(len(gaze_on_srf))
    #         self.metrics_gazecount = len(gaze_on_srf)

    #     if results == []:
    #         logger.warning("No surfaces defined.")
    #         return
    #     max_res = max(results)
    #     results = np.array(results,dtype=np.float32)
    #     if not max_res:
    #         logger.warning("No gaze on any surface for this section!")
    #     else:
    #         results *= 255./max_res
    #     results = np.uint8(results)
    #     results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

    #     for s,c_map in zip(self.surfaces,results_c_maps):
    #         heatmap = np.ones((1,1,4),dtype=np.uint8)*125
    #         heatmap[:,:,:3] = c_map
    #         s.metrics_texture = Named_Texture()
    #         s.metrics_texture.update_from_ndarray(heatmap)

    def recalculate_all_sections(self):
        """
            treats all sections as one
            should not be used to overlaid sections
        """
        # for now, it requires trim_marks_patch.py
        sections_alive = False
        for p in self.g_pool.plugins:
            if p.class_name == 'Trim_Marks_Extended':
                sections_alive = True

        if sections_alive:
            sections = self.g_pool.trim_marks.sections     
            for s in self.surfaces:
                if s.defined:
                    # assign user defined variables
                    s.heatmap_blur = self.heatmap_blur
                    s.heatmap_blur_gradation = self.heatmap_blur_gradation
                    s.heatmap_use_kdata = self.heatmap_use_kdata
                    s.heatmap_colormap = self.heatmap_colormap
                    s.gaze_correction_block_size = self.gaze_correction_block_size
                    s.gaze_correction_min_confidence = self.gaze_correction_min_confidence
                    s.gaze_correction_k = self.gaze_correction_k
                    
                    # generate visualizations
                    s.generate_gaze_cloud(sections, True)
                    s.generate_gaze_correction(sections, True)
                    s.generate_heatmap(sections, True)
                    s.generate_mean_correction(sections, True)

            logger.info("Recalculate visualizations done.")
                    
        else:
            logger.error("Trim_Marks_Extended not found. Have you opened it?")

    def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        super(Offline_Screen_Tracker, self).gl_display()

        if self.mode == "Show Gaze Cloud":
            for s in self.surfaces:
                s.gl_display_gaze_cloud()

        if self.mode == "Show Kmeans Correction":
            for s in self.surfaces:
                s.gl_display_gaze_correction()

        if self.mode == "Show Heatmap Correction":
            for s in self.surfaces:
                s.gl_display_mean_correction()

        if self.mode == "Show Mean Correction":
            for s in self.surfaces:
                s.gl_display_mean_correction()

    def precision_report(self, custom_tag=None):
        sections_alive = False
        if self.g_pool.trim_marks.class_name == 'Trim_Marks_Extended':
            sections_alive = True

        segmentation = None
        for p in self.g_pool.plugins:
            if p.class_name == 'Segmentation':
                if p.alive:
                    segmentation = p
                    break

        if (segmentation is not None) and sections_alive:
            export_path = os.path.join(self.g_pool.rec_dir,'exports')
            save_path = os.path.join(export_path,"precision_report")
            if os.path.isdir(save_path):
                logger.info("Overwriting data on precision_report")
            else:
                try:
                    os.mkdir(save_path)
                except:
                    logger.warning("Could not make dir %s"%save_path)
                    return

            angles,x1,y1 = segmentation.scapp_report['Angle'],segmentation.scapp_report['X1'],segmentation.scapp_report['Y1']
            unique_distances = sorted(set(zip(angles,x1,y1)))
            unique_responses = sorted(set(segmentation.scapp_report['ExpcResp']))
            segmentation.filter_by_expresp = True
            segmentation.filter_by_distance = True
            segmentation.filter_by_angle = False
            segmentation.mode = 'in out pairs'

            filtered_gaze = []
            metadata=[]
            for unique_distance in unique_distances:
                segmentation.distance = str(unique_distance)
                for unique_response in unique_responses:
                    (s1, s2, s3) = unique_distance
                    metadata.append("r_%s_distance_%s-%s-%s"%(unique_response, s1, s2, s3))
                    segmentation.expected_response = str(unique_response)
                    segmentation.clean_add_trim()

                    sections = self.g_pool.trim_marks.sections
                    gaze_no_confidence = 0
                    no_surface = 0
                    all_gaze = []
                    for s in self.surfaces: 
                        if s.defined:
                            for sec in sections:
                                in_mark = sec[0]
                                out_mark = sec[1]
                                sec = slice(in_mark,out_mark)
                                for frame_idx,c_e in enumerate(s.cache[sec]):
                                    if c_e:
                                        frame_idx+=sec.start
                                        for i, gp in enumerate(s.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen'])):
                                            if gp['base']['confidence'] >= self.gaze_correction_min_confidence:
                                                all_gaze.append({'frame':frame_idx,'i':i,'norm_pos':gp['norm_pos'],'metatag':'%s-%s-%s-%s'%(unique_response, s1, s2, s3)})
                                            else:
                                                gaze_no_confidence += 1
                                    else:
                                        no_surface += 1

                    if not all_gaze:
                        logger.error("No Gaze points found.")
                        metadata.append("No gaze points found.")
                        return
                    else:
                        gaze_count = len(all_gaze)
                        metadata.append('Found %s frames with no screen/surface.'%no_surface)
                        metadata.append("Found %s gaze points."%gaze_count)
                        metadata.append("Removed '{0}' with confidence < '{1}'".format(gaze_no_confidence, self.gaze_correction_min_confidence))

                    filtered_gaze.append(all_gaze)

            if custom_tag:
                np.save(os.path.join(save_path,'data_ordered_by_metatag'+custom_tag),filtered_gaze)
            else:
                np.save(os.path.join(save_path,'data_ordered_by_metatag'),filtered_gaze)
            #np.savetxt(os.path.join(save_path,'metadata.txt'),metadata)  

            segmentation.clean_custom_events()
            for unique_distance in unique_distances:
                segmentation.distance = str(unique_distance)
                for unique_response in unique_responses:
                    segmentation.expected_response = str(unique_response)
                    segmentation.add_filtered_events()
            
            segmentation.auto_trim()
            filtered_gaze = []
            mean_at_zero_cluster = []
            norm_gaze = []
            for s in self.surfaces: 
                if s.defined:
                    for sec in self.g_pool.trim_marks.sections:
                        section_gaze = []
                        in_mark = sec[0]
                        out_mark = sec[1]
                        sec = slice(in_mark,out_mark)
                        for frame_idx,c_e in enumerate(s.cache[sec]):
                            if c_e:
                                frame_idx+=sec.start
                                for i, gp in enumerate(s.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen'])):
                                    trial = segmentation.trial_from_timestamp(gp['base']['timestamp'])
                                    if gp['base']['confidence'] >= self.gaze_correction_min_confidence:
                                        section_gaze.append({'frame':frame_idx,'i':i,'norm_pos':gp['norm_pos'],'trial':trial})

                        filtered_gaze.append(section_gaze)
            if custom_tag:
                np.save(os.path.join(save_path,'data_ordered_by_trial'+custom_tag),filtered_gaze)
            else:         
                np.save(os.path.join(save_path,'data_ordered_by_trial'),filtered_gaze)                       
        else:
            logger.error("Please, open the segmentation plugin.")

    def export_all_precision(self):
        segmentation = None
        for p in self.g_pool.plugins:
            if p.class_name == 'Segmentation':
                if p.alive:
                    segmentation = p
                    break

        segmentation.onset = 0.0
        segmentation.offset = 1.5
        for status in range(16):
            tag = '_%s-%s'%(segmentation.onset,segmentation.offset)
            tag = tag.replace('.','-')
            logger.info(str(status)+tag)
            self.precision_report(tag)
            segmentation.onset += 0.1 
            segmentation.offset -= 0.1
        logger.info('end')

    def export_all_distances(self):
        segmentation = None
        for p in self.g_pool.plugins:
            if p.class_name == 'Segmentation':
                if p.alive:
                    segmentation = p
                    break

        if segmentation is not None:
            angles,x1,y1 = segmentation.scapp_report['Angle'],segmentation.scapp_report['X1'],segmentation.scapp_report['Y1']
            unique_items = sorted(set(zip(angles,x1,y1)))
            for unique_distance in unique_items:
                segmentation.distance = str(unique_distance)
                segmentation.clean_add_trim()
                in_mark = self.g_pool.trim_marks.in_mark
                out_mark = self.g_pool.trim_marks.out_mark

                # generate visualizations and data
                self.recalculate_all_sections()
                export_path = os.path.join(self.g_pool.rec_dir,'exports')
                save_path = os.path.join(export_path,"distance_%s-%s-%s"%unique_distance)

                if os.path.isdir(save_path):
                    logger.info("Overwriting data on distance %s-%s-%s"%unique_distance)
                else:
                    try:
                        os.mkdir(save_path)
                    except:
                        logger.warning("Could not make dir %s"%save_path)
                        return

                for s in self.surfaces:
                    surface_name = '_'+s.name.replace('/','')+'_'+s.uid
                    if s.heatmap is not None:
                        logger.info("Saved Heatmap as .png file.")
                        cv2.imwrite(os.path.join(save_path,'heatmap'+surface_name+'.png'),s.heatmap)

                    if s.gaze_cloud is not None:
                        logger.info("Saved Gaze Cloud as .png file.")
                        cv2.imwrite(os.path.join(save_path,'gaze_cloud'+surface_name+'.png'),s.gaze_cloud)

                    if s.gaze_correction is not None:
                        logger.info("Saved Gaze Correction as .png file.")
                        cv2.imwrite(os.path.join(save_path,'gaze_correction'+surface_name+'.png'),s.gaze_correction)

                    # export a surface image from the center of the first section for visualization purposes only 
                    self.export_section_image(save_path, s, in_mark, out_mark, os.path.join(save_path,'surface'+surface_name+'.png'))

                    # if s.gaze_correction_mean is not None:
                    #     logger.info("Saved Gaze Correction Mean as .png file.")
                    #     cv2.imwrite(os.path.join(save_path,'gaze_correction_mean'+surface_name+'.png'),s.gaze_correction_mean)

                    np.save(os.path.join(save_path,'source_data'),s.output_data)

    def export_section_image(self,save_path,s,in_mark,out_mark,surface_path):
        # lets save out the current surface image found in video
        seek_pos = in_mark + ((out_mark - in_mark)/2)
        self.g_pool.capture.seek_to_frame(seek_pos)
        new_frame = self.g_pool.capture.get_frame()
        frame = new_frame.copy()
        self.update(frame, {})
        if s.detected and frame.img is not None:
            #here we get the verts of the surface quad in norm_coords
            mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2)
            screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2)
            
            #now we convert to image pixel coords
            screen_space[:,1] = 1-screen_space[:,1]
            screen_space[:,1] *= frame.img.shape[0]
            screen_space[:,0] *= frame.img.shape[1]
            s_0,s_1 = s.real_world_size['x'], s.real_world_size['y'] 
            
            #now we need to flip vertically again by setting the mapped_space verts accordingly.
            mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32)
            M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled)
            
            #here we do the actual perspactive transform of the image.
            srf_in_video = cv2.warpPerspective(frame.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) )
            cv2.imwrite(surface_path,srf_in_video)
            logger.info("Saved: '%s'"%surface_path)
        else:
            logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name)


    def export_all_sections(self):
        for section in self.g_pool.trim_marks.sections:
            self.g_pool.trim_marks.focus = self.g_pool.trim_marks.sections.index(section)
            in_mark = self.g_pool.trim_marks.in_mark
            out_mark = self.g_pool.trim_marks.out_mark
            export_path = export_path = os.path.join(self.g_pool.rec_dir,'exports')
            if os.path.isdir(export_path):
                logger.info("Will overwrite export_path")
            else:
                try:
                    os.mkdir(export_path)
                except:
                    logger.warning("Could not make metrics_dir %s"%export_path)
                    return

            metrics_dir = os.path.join(export_path,"%s-%s"%(in_mark,out_mark))
            if os.path.isdir(metrics_dir):
                logger.info("Will overwrite metrics_dir")
            else:
                try:
                    os.mkdir(metrics_dir)
                except:
                    logger.warning("Could not make metrics_dir %s"%metrics_dir)
                    return

            self.recalculate()
            self.save_surface_statsics_to_file(slice(in_mark,out_mark), metrics_dir)

            surface_dir = os.path.join(metrics_dir,'surfaces')

            for s in self.surfaces:
                surface_name = '_'+s.name.replace('/','')+'_'+s.uid
                if s.heatmap is not None:
                    logger.info("Saved Heatmap as .png file.")
                    cv2.imwrite(os.path.join(surface_dir,'heatmap'+surface_name+'.png'),s.heatmap)

                if s.gaze_cloud is not None:
                    logger.info("Saved Gaze Cloud as .png file.")
                    cv2.imwrite(os.path.join(surface_dir,'gaze_cloud'+surface_name+'.png'),s.gaze_cloud)

                if s.gaze_correction is not None:
                    logger.info("Saved Gaze Correction as .png file.")
                    cv2.imwrite(os.path.join(surface_dir,'gaze_correction'+surface_name+'.png'),s.gaze_correction)

                surface_path = os.path.join(surface_dir,'surface'+surface_name+'.png')

                # export a surface image from the center of the section for visualization purposes only
                self.export_section_image(surface_dir, s, in_mark, out_mark, surface_path)

                # lets create alternative versions of the surfaces *.pngs
                src1 = cv2.imread(surface_path)
                for g in s.output_data['gaze']:
                    cv2.circle(src1, (int(g[0]),int(g[1])), 3, (0, 0, 0), 0)

                for c in s.output_data['kmeans']:
                    cv2.circle(src1, (int(c[0]),int(c[1])), 5, (0, 0, 255), -1)
                cv2.imwrite(os.path.join(surface_dir,'surface-gaze_cloud'+surface_name+'.png'),src1)

                np.savetxt(os.path.join(surface_dir,'surface-gaze_cloud'+surface_name+'.txt'), s.output_data['gaze'])
                #src2 = cv2.imread(os.path.join(surface_dir,'heatmap'+surface_name+'.png'))
                #dst = cv2.addWeighted(src1, .9, src2, .1, 0.0);                
                #cv2.imwrite(os.path.join(surface_dir,'surface-heatmap'+surface_name+'.png'),dst)
            
            self.g_pool.capture.seek_to_frame(in_mark)
            logger.info("Done exporting reference surface data.")

    def export_raw_data(self):
        """
        .surface_gaze_positions - gaze_timestamp, surface_norm_x, surface_norm_y

        """
        sections_alive = False
        if self.g_pool.trim_marks.class_name == 'Trim_Marks_Extended':
            sections_alive = True

        segmentation = None
        for p in self.g_pool.plugins:
            if p.class_name == 'Segmentation':
                if p.alive:
                    segmentation = p
                    break

    def get_init_dict(self):
        return {'mode':self.mode,
                'matrix':self.matrix}

    def on_notify(self,notification):
        if notification['subject'] == 'gaze_positions_changed':
            logger.info('Gaze positions changed. Please, recalculate.')
            #self.recalculate()
        if notification['subject'] == 'gaze_positions_changed':
            logger.info('Gaze postions changed. Please, recalculate..')
            #self.recalculate()
        elif notification['subject'] == 'surfaces_changed':
            logger.info('Surfaces changed. Please, recalculate..')
            #self.recalculate()
        elif notification['subject'] == 'min_marker_perimeter_changed':
            logger.info('min_marker.. not implemented')
            #logger.info('Min marper perimeter adjusted. Re-detecting surfaces.')
            #self.invalidate_surface_caches()
        elif notification['subject'] is "should_export":
            logger.info('should_export.. not implemented')