Beispiel #1
0
def main():
    # To assign camera by name: put string(s) in list

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='GUI for gaze tracking and pupillometry')
    parser.add_argument('-eye', dest='eye_file', type=str, help="Work with existing video recording, instead of live feed", default='')
    parser.add_argument('-world', dest='world_file', type=str, help="Work with existing video recording, instead of live feed", default='')

    args = parser.parse_args()

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    if args.eye_file == '':
        eye_src = ["UI154xLE-M", "USB Camera-B4.09.24.1", "FaceTime Camera (Built-in)", "Microsoft", "6000","Integrated Camera"]
        # to assign cameras directly, using integers as demonstrated below
        # eye_src = 1
    else:
#        print "Using provide file: %s" % args.filename
        eye_src = args.eye_file

    if args.world_file == '':
        world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]
        # to assign cameras directly, using integers as demonstrated below
        # world_src = 0
    else:
        world_src = args.world_file

    # Camera video size in pixels (width,height)
    eye_size = (260,216) #(1280,1024)
    world_size = (640,480)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
Beispiel #2
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000", "Integrated Camera"]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640, 360)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool, 0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double, 0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool, eye_src, eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    p_eye.join()
Beispiel #3
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000","Integrated Camera"]
    world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640,360)
    world_size = (1280,720)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)



    # Exit / clean-up
    p_eye.join()
Beispiel #4
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000","Integrated Camera"]
    world_src = ["Logitech Camera","B525", "C525","C615","C920","C930e"]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = "/Users/mkassner/Pupil/datasets/eye2_fieldtest/eye 10.avi"
    # world_src = "/Users/mkassner/Downloads/2013_10_22_M25/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640,360)
    world_size = (1280,720)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    # On Linux, we need to give the camera driver some time before requesting another camera.
    sleep(0.5)
    # On MacOS cameras using MJPG compression (world camera) need to run in the main process.
    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
Beispiel #5
0
def world(g_pool,cap_src,cap_size):
    """world
    Creates a window, gl context.
    Grabs images from a capture.
    Receives Pupil coordinates from g_pool.pupil_queue
    Can run various plug-ins.
    """


    # Callback functions
    def on_resize(window,w, h):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(window)
        adjust_gl_view(w,h)
        atb.TwWindowSize(w, h)
        glfwMakeContextCurrent(active_window)

    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key,action):
            if action == GLFW_PRESS:
                if key == GLFW_KEY_ESCAPE:
                    on_close(window)

    def on_char(window,char):
        if not atb.TwEventCharGLFW(char,1):
            pass

    def on_button(window,button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button,action):
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(world_window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
            for p in g.plugins:
                p.on_click(pos,button,action)

    def on_pos(window,x, y):
        if atb.TwMouseMotion(int(x),int(y)):
            pass

    def on_scroll(window,x,y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')



    # load session persistent settings
    session_settings = shelve.open(os.path.join(g_pool.user_dir,'user_settings_world'),protocol=2)
    def load(var_name,default):
        return session_settings.get(var_name,default)
    def save(var_name,var):
        session_settings[var_name] = var


    # Initialize capture, check if it works
    cap = autoCreateCapture(cap_src, cap_size, 24)
    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    frame = cap.get_frame()
    if frame.img is None:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return
    height,width = frame.img.shape[:2]


    # helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1. / dt - bar.fps.value)

    def set_window_size(mode,data):
        height,width = frame.img.shape[:2]
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(world_window,w,h)
        data.value=mode # update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    def open_calibration(selection,data):
        # prepare destruction of current ref_detector... and remove it
        for p in g.plugins:
            if isinstance(p,calibration_routines.detector_by_index):
                p.alive = False
        g.plugins = [p for p in g.plugins if p.alive]

        new_ref_detector = calibration_routines.detector_by_index[selection](g_pool,atb_pos=bar.next_atb_pos)
        g.plugins.append(new_ref_detector)
        g.plugins.sort(key=lambda p: p.order)

        # save the value for atb bar
        data.value=selection

    def toggle_record_video():
        for p in g.plugins:
            if isinstance(p,recorder.Recorder):
                p.alive = False
                return
        # set up folder within recordings named by user input in atb
        if not bar.rec_name.value:
            bar.rec_name.value = recorder.get_auto_name()

        new_plugin = recorder.Recorder(g_pool,bar.rec_name.value, bar.fps.value, frame.img.shape, bar.record_eye.value, g_pool.eye_tx)
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)

    def toggle_show_calib_result():
        for p in g.plugins:
            if isinstance(p,Show_Calibration):
                p.alive = False
                return

        new_plugin = Show_Calibration(g_pool,frame.img.shape)
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)

    def toggle_server():
        for p in g.plugins:
            if isinstance(p,Pupil_Server):
                p.alive = False
                return

        new_plugin = Pupil_Server(g_pool,(10,300))
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)


    def toggle_ar():
        for p in g.plugins:
            if isinstance(p,Marker_Detector):
                p.alive = False
                return

        new_plugin = Marker_Detector(g_pool,(10,400))
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)

    atb.init()
    # add main controls ATB bar
    bar = atb.Bar(name = "World", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,valueswidth=150,
            text='light', position=(10, 10),refresh=.3, size=(300, 200))
    bar.next_atb_pos = (10,220)
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.calibration_type = c_int(load("calibration_type",0))
    bar.record_eye = c_bool(load("record_eye",0))
    bar.window_size = c_int(load("window_size",0))
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})
    calibrate_type_enum = atb.enum("Calibration Method",calibration_routines.index_by_name)
    bar.rec_name = create_string_buffer(512)
    bar.version = create_string_buffer(g_pool.version,512)
    bar.rec_name.value = recorder.get_auto_name()
    bar.add_var("fps", bar.fps, step=1., readonly=True)
    bar.add_var("display size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("calibration method",setter=open_calibration,getter=get_from_data,data=bar.calibration_type, vtype=calibrate_type_enum,group="Calibration", help="Please choose your desired calibration method.")
    bar.add_button("show calibration result",toggle_show_calib_result, group="Calibration", help="Click to show calibration result.")
    bar.add_var("session name",bar.rec_name, group="Recording", help="creates folder Data_Name_XXX, where xxx is an increasing number")
    bar.add_button("record", toggle_record_video, key="r", group="Recording", help="Start/Stop Recording")
    bar.add_var("record eye", bar.record_eye, group="Recording", help="check to save raw video of eye")
    bar.add_button("start/stop marker tracking",toggle_ar,key="x",help="find markers in scene to map gaze onto referace surfaces")
    bar.add_button("start/stop server",toggle_server,key="s",help="the server broadcasts pupil and gaze positions locally or via network")
    bar.add_separator("Sep1")
    bar.add_var("version",bar.version, readonly=True)
    bar.add_var("exit", g_pool.quit)

    # add uvc camera controls ATB bar
    cap.create_atb_bar(pos=(320,10))

    # Initialize glfw
    glfwInit()
    world_window = glfwCreateWindow(width, height, "World", None, None)
    glfwMakeContextCurrent(world_window)

    # Register callbacks world_window
    glfwSetWindowSizeCallback(world_window,on_resize)
    glfwSetWindowCloseCallback(world_window,on_close)
    glfwSetKeyCallback(world_window,on_key)
    glfwSetCharCallback(world_window,on_char)
    glfwSetMouseButtonCallback(world_window,on_button)
    glfwSetCursorPosCallback(world_window,on_pos)
    glfwSetScrollCallback(world_window,on_scroll)

    #set the last saved window size
    set_window_size(bar.window_size.value,bar.window_size)
    on_resize(world_window, *glfwGetFramebufferSize(world_window))
    glfwSetWindowPos(world_window,0,0)

    # gl_state settings
    basic_gl_setup()

    # refresh speed settings
    glfwSwapInterval(0)

    # load last calibration data
    try:
        pt_cloud = np.load(os.path.join(g_pool.user_dir,'cal_pt_cloud.npy'))
        logger.info("Using calibration found in %s" %g_pool.user_dir)
        map_pupil = calibrate.get_map_from_cloud(pt_cloud,(width,height))
    except:
        logger.info("No calibration found.")
        def map_pupil(vector):
            """ 1 to 1 mapping
            """
            return vector

    # create container for globally scoped vars (within world)
    g = Temp()
    g.plugins = []
    g_pool.map_pupil = map_pupil

    #load calibration plugin
    open_calibration(bar.calibration_type.value,bar.calibration_type)

    #load gaze_display plugin
    g.plugins.append(Display_Recent_Gaze(g_pool))

    # Event loop
    while not g_pool.quit.value:

        # Get an image from the grabber
        frame = cap.get_frame()
        update_fps()


        #a container that allows plugins to post and read events
        events = []

        #receive and map pupil positions
        recent_pupil_positions = []
        while not g_pool.pupil_queue.empty():
            p = g_pool.pupil_queue.get()
            if p['norm_pupil'] is None:
                p['norm_gaze'] = None
            else:
                p['norm_gaze'] = g_pool.map_pupil(p['norm_pupil'])
            recent_pupil_positions.append(p)


        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,recent_pupil_positions,events)

        #check if a plugin need to be destroyed
        g.plugins = [p for p in g.plugins if p.alive]

        # render camera image
        glfwMakeContextCurrent(world_window)
        draw_gl_texture(frame.img)

        # render visual feedback from loaded plugins
        for p in g.plugins:
            p.gl_display()

        atb.draw()
        glfwSwapBuffers(world_window)
        glfwPollEvents()


    # de-init all running plugins
    for p in g.plugins:
        p.alive = False
        #reading p.alive actually runs plug-in cleanup
        _ = p.alive

    save('window_size',bar.window_size.value)
    save('calibration_type',bar.calibration_type.value)
    save('record_eye',bar.record_eye.value)
    session_settings.close()

    cap.close()
    glfwDestroyWindow(world_window)
    glfwTerminate()
    logger.debug("Process done")
Beispiel #6
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           data_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse and load data dir info
    video_path = data_dir + "/world.avi"
    timestamps_path = data_dir + "/timestamps.npy"
    gaze_positions_path = data_dir + "/gaze_positions.npy"
    record_path = data_dir + "/world_viz.avi"

    #parse info.csv file
    with open(data_dir + "/info.csv") as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))
    rec_version = meta_info["Capture Software Version"]
    rec_version_int = int(
        filter(type(rec_version).isdigit,
               rec_version)[:3]) / 100  #(get major,minor,fix of version)
    logger.debug("Exporting a video from recording with version: %s , %s" %
                 (rec_version, rec_version_int))

    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)
    #correlate data
    positions_by_frame = correlate_gaze(gaze_list, timestamps)

    # Initialize capture, check if it works
    cap = autoCreateCapture(video_path, timestamps=timestamps_path)
    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    width, height = cap.get_size()

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(data_dir, "world_viz.avi")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = data_dir
        if not file_name:
            file_name = 'world_viz.avi'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give an job lenght and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #lets get the avg. framerate for our slice of video:
    fps = float(len(trimmed_timestamps)) / (trimmed_timestamps[-1] -
                                            trimmed_timestamps[0])
    logger.debug("Framerate of export video is %s" % fps)

    #setup of writer
    writer = cv2.VideoWriter(out_file_path, cv2.cv.CV_FOURCC(*'DIVX'), fps,
                             (width, height))

    cap.seek_to_frame(start_frame)

    start_time = time()

    plugins = []
    g = Temp()
    g.plugins = plugins
    g.app = 'exporter'

    # load plugins from initializers:
    for initializer in plugin_initializers:
        name, args = initializer
        logger.debug("Loading plugin: %s with settings %s" % (name, args))
        try:
            p = plugin_by_name[name](g, **args)
            plugins.append(p)
        except:
            logger.warning("Plugin '%s' failed to load." % name)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.release()
            writer = None
            return False

        new_frame = cap.get_frame()
        #end of video logic: pause at last frame.
        if not new_frame:
            logger.error("Could not read all frames.")
            #explicit release of VideoWriter
            writer.release()
            writer = None
            return False
        else:
            frame = new_frame

        #new positons and events
        current_pupil_positions = positions_by_frame[frame.index]
        events = None

        # allow each Plugin to do its work.
        for p in plugins:
            p.update(frame, current_pupil_positions, events)

        # # render gl visual feedback from loaded plugins
        # for p in plugins:
        #     p.gl_display(frame)

        writer.write(frame.img)
        current_frame.value += 1

    writer.release()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))

    return True
Beispiel #7
0
def main():

	# To assign by name: put string(s) in list
	eye_src = ["Microsoft", "6000"]
	world_src = ["Logitech Camera","B525", "C525","C615","C920","C930e"]

	# to assign cameras directly, using integers as demonstrated below
	# eye_src = 1
	# world_src = 0

	# to use a pre-recorded video.
	# Use a string to specify the path to your video file as demonstrated below
	# eye_src = "/Users/mkassner/Pupil/pupil_google_code/wiki/videos/eye_simple_filter.avi"
	# world_src = 0

	# Camera video size in pixels (width,height)
	eye_size = (640,360)
	world_size = (1280,720)


	# Use the player - a seperate window for video playback and calibration animation
	use_player = True
	#startup size for the player window: this can be whatever you like
	player_size = (640,360)

	# Create and initialize shared globals
	g_pool = Temp()
	g_pool.gaze = Array('d',(0.0,0.0))
	g_pool.ref = Array('d',(0.0,0.0))
	g_pool.marker = Array('d',(0.0,0.0))
	g_pool.marker_state = Value('d',0.0)
	g_pool.calibrate = Value(c_bool, 0)
	g_pool.pos_record = Value(c_bool, 0)
	g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
	g_pool.player_refresh = Event()
	g_pool.player_input = Value('i',0)
	g_pool.play = RawValue(c_bool,0)
	g_pool.quit = RawValue(c_bool,0)
	# shared constants
	g_pool.eye_src = eye_src
	g_pool.eye_size = eye_size
	g_pool.world_src = world_src
	g_pool.world_size = world_size

	# set up subprocesses
	p_eye = Process(target=eye, args=(g_pool,))
	if use_player: p_player = Process(target=player, args=(g_pool,player_size))

	# spawn subprocesses
	if use_player: p_player.start()
	p_eye.start()

	# On Linux, we need to give the camera driver some time before requesting another camera.
	sleep(1)

	# on MacOS, when using some cameras (like our current logitech worldcamera)
	# you can't run the world camera grabber in its own process
	# it must reside in the main process when you run on MacOS.
	world(g_pool)

	# Exit / clean-up
	p_eye.join()
	if use_player: p_player.join()
	print "main exit"
Beispiel #8
0
def player(g_pool,size):
    """player
        - Shows 9 point calibration pattern
        - Plays a source video synchronized with world process
        - Get src videos from directory (glob)
        - Iterate through videos on each record event
    """


    grid = make_grid()
    grid *=2.5###scale to fit
    # player object
    player = Temp()
    player.play_list = glob('src_video/*')
    path_parent = os.path.dirname( os.path.abspath(sys.argv[0]))
    player.playlist = [os.path.join(path_parent, path) for path in player.play_list]
    player.captures = [autoCreateCapture(src) for src in player.playlist]
    print "Player found %i videos in src_video"%len(player.captures)
    player.captures =  [c for c in player.captures if c is not None]
    print "Player sucessfully loaded %i videos in src_video"%len(player.captures)
    # for c in player.captures: c.auto_rewind = False
    player.current_video = 0

    # Callbacks
    def on_resize(w, h):
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if key == GLFW_KEY_ESC:
                on_close()
    def on_char(char, pressed):
        if char  == ord('9'):
            g_pool.cal9.value = True
            g_pool.calibrate.value = True



    def on_close():
        g_pool.quit.value = True
        print "Player Process closing from window"


    # initialize glfw
    glfwInit()
    glfwOpenWindow(size[0], size[1], 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Player")
    glfwSetWindowPos(100,0)
    glfwDisable(GLFW_AUTO_POLL_EVENTS)


    #Callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)


    #gl state settings
    gl.glEnable( gl.GL_BLEND )
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glClearColor(1.,1.,1.,0.)


    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:

        glfwPollEvents()

        if g_pool.player_refresh.wait(0.01):
            g_pool.player_refresh.clear()

            clear_gl_screen()
            if g_pool.cal9.value:
                circle_id,step = g_pool.cal9_circle_id.value,g_pool.cal9_step.value
                gl.glColor4f(0.0,0.0,0.0,1.0)
                gl.glPointSize(40)
                gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
                gl.glBegin(gl.GL_POINTS)
                for p in grid:
                    gl.glVertex3f(p[0],p[1],0.0)
                gl.glEnd()

                ###display the animated target dot
                gl.glPointSize((40)*(1.01-(step+1)/80.0))
                gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ZERO)
                if g_pool.ref_x.value or g_pool.ref_y.value: ###if pattern detected
                    gl.glColor4f(0.0,0.5,0.0,1.0)
                else:
                    gl.glColor4f(0.5,0.0,0.0,1.0)
                gl.glBegin(gl.GL_POINTS)
                gl.glVertex3f(grid[circle_id][0],grid[circle_id][1],0.0)
                gl.glEnd()

            elif g_pool.play.value:
                s, img = player.captures[player.current_video].read_RGB()
                if s:
                    draw_gl_texture(image)
                else:
                    player.captures[player.current_video].rewind()
                    player.current_video +=1
                    if player.current_video >= len(player.captures):
                        player.current_video = 0
                    g_pool.play.value = False
            glfwSwapBuffers()

    glfwCloseWindow()
    glfwTerminate()
    print "PLAYER Process closed"
Beispiel #9
0
def world(g_pool):
    """world
    """

    ###Callback funtions
    def on_resize(w, h):
        atb.TwWindowSize(w, h);
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key,pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char,pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button,pressed):
            if pressed:
                pos = glfwGetMousePos()
                pos = normalize(pos,glfwGetWindowSize())
                pos = denormalize(pos,(img.shape[1],img.shape[0]) ) #pos in img pixels
                ref.detector.new_ref(pos)


    def on_pos(x, y):
        if atb.TwMouseMotion(x,y):
            pass

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "WORLD Process closing from window"

    ref = Temp()
    ref.detector = no_Detector(g_pool.calibrate,g_pool.ref_x,g_pool.ref_y)
    ###objects as variable containers
    # pattern object
    pattern = Temp()
    pattern.centers = None
    pattern.obj_grid = gen_pattern_grid((4, 11))  # calib grid
    pattern.obj_points = []
    pattern.img_points = []
    pattern.map = (0, 2, 7, 16, 21, 23, 39, 40, 42)
    pattern.board_centers = None

    # gaze object
    gaze = Temp()
    gaze.map_coords = (0., 0.)
    gaze.image_coords = (0., 0.)
    # record object
    record = Temp()
    record.writer = None
    record.path_parent = os.path.dirname(os.path.abspath(sys.argv[0]))
    record.path = None
    record.counter = 0

    # initialize capture, check if it works
    cap = autoCreateCapture(g_pool.world_src, g_pool.world_size)
    if cap is None:
        print "WORLD: Error could not create Capture"
        return
    s, img = cap.read_RGB()
    if not s:
        print "WORLD: Error could not get image"
        return
    height,width = img.shape[:2]


    ###helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1 / dt - bar.fps.value)

    def set_window_size(mode,data):
        height,width = img.shape[:2]
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(w,h)
        data.value=mode #update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value


    def start_calibration():

        c_type = bar.calibration_type.value
        if  c_type == cal_type["Directed 9-Point"]:
            print 'WORLD: Starting Directed 9-Point calibration.'
            ref.detector = Nine_Point_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y,
                                            shared_stage=g_pool.cal9_stage,
                                            shared_step=g_pool.cal9_step,
                                            shared_cal9_active=g_pool.cal9,
                                            shared_circle_id=g_pool.cal9_circle_id,
                                            auto_advance=False)
        elif c_type == cal_type["Automated 9-Point"]:
            print 'WORLD: Starting Automated 9-Point calibration.'
            ref.detector = Nine_Point_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y,
                                            shared_stage=g_pool.cal9_stage,
                                            shared_step=g_pool.cal9_step,
                                            shared_cal9_active=g_pool.cal9,
                                            shared_circle_id=g_pool.cal9_circle_id,
                                            auto_advance=True)
        elif c_type == cal_type["Natural Features"]:
            print 'WORLD: Starting Natural Features calibration.'
            ref.detector = Natural_Features_Detector(global_calibrate=g_pool.calibrate,
                                                    shared_x=g_pool.ref_x,
                                                    shared_y=g_pool.ref_y)
        elif c_type == cal_type["Black Dot"]:
            print 'WORLD: Starting Black Dot calibration.'
            ref.detector = Black_Dot_Detector(global_calibrate=g_pool.calibrate,
                                            shared_x=g_pool.ref_x,
                                            shared_y=g_pool.ref_y)
    def advance_calibration():
        ref.detector.advance()

    def stop_calibration():
        ref.detector = no_Detector(global_calibrate=g_pool.calibrate,
                                shared_x=g_pool.ref_x,
                                shared_y=g_pool.ref_y)

    ### Initialize ant tweak bar inherits from atb.Bar
    atb.init()
    bar = atb.Bar(name = "World", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,
            text='light', position=(10, 10),refresh=.3, size=(200, 200))
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.calibration_type = c_int(1)
    bar.show_calib_result = c_bool(0)
    bar.calibration_images = False
    bar.record_video = c_bool(0)
    bar.record_running = c_bool(0)
    bar.play = g_pool.play
    bar.window_size = c_int(0)
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})
    cal_type = {"Directed 9-Point":0,"Automated 9-Point":1,"Natural Features":3,"Black Dot":4}#"Manual 9-Point":2
    calibrate_type_enum = atb.enum("Calibration Method",cal_type)
    bar.rec_name = create_string_buffer(512)

    # play and record can be tied together via pointers to the objects
    # bar.play = bar.record_video
    bar.add_var("FPS", bar.fps, step=1., readonly=True)
    bar.add_var("Display_Size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("Cal/Calibration_Method",bar.calibration_type, vtype=calibrate_type_enum)
    bar.add_button("Cal/Start_Calibration",start_calibration, key='c')
    bar.add_button("Cal/Next_Point",advance_calibration,key="SPACE", help="Hit space to calibrate on next dot")
    bar.add_button("Cal/Stop_Calibration",stop_calibration, key='d')
    bar.add_var("Cal/show_calibration_result",bar.show_calib_result, help="yellow lines indecate fit error, red outline shows the calibrated area.")
    bar.add_var("Rec/rec_name",bar.rec_name)
    bar.add_var("Rec/Record_Video", bar.record_video, key="r", help="Start/Stop Recording")
    bar.add_separator("Sep1")
    bar.add_var("Play Source Video", bar.play)
    bar.add_var("Exit", g_pool.quit)

    #add 4vl2 camera controls to a seperate ATB bar
    if cap.controls is not None:
        c_bar = atb.Bar(name="Camera_Controls", label=cap.name,
            help="UVC Camera Controls", color=(50,50,50), alpha=100,
            text='light',position=(220, 10),refresh=2., size=(200, 200))

        # c_bar.add_var("auto_refresher",vtype=atb.TW_TYPE_BOOL8,getter=cap.uvc_refresh_all,setter=None,readonly=True)
        # c_bar.define(definition='visible=0', varname="auto_refresher")

        sorted_controls = [c for c in cap.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        for control in sorted_controls:
            name = control.atb_name
            if control.type=="bool":
                c_bar.add_var(name,vtype=atb.TW_TYPE_BOOL8,getter=control.get_val,setter=control.set_val)
            elif control.type=='int':
                c_bar.add_var(name,vtype=atb.TW_TYPE_INT32,getter=control.get_val,setter=control.set_val)
                c_bar.define(definition='min='+str(control.min),   varname=name)
                c_bar.define(definition='max='+str(control.max),   varname=name)
                c_bar.define(definition='step='+str(control.step), varname=name)
            elif control.type=="menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype= atb.enum(name,control.menu)
                c_bar.add_var(name,vtype=vtype,getter=control.get_val,setter=control.set_val)
                if control.menu is None:
                    c_bar.define(definition='min='+str(control.min),   varname=name)
                    c_bar.define(definition='max='+str(control.max),   varname=name)
                    c_bar.define(definition='step='+str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # c_bar.define(definition='readonly=1',varname=control.name)

        c_bar.add_button("refresh",cap.update_from_device)
        c_bar.add_button("load defaults",cap.load_defaults)

    else:
        c_bar = None

    ### Initialize glfw
    glfwInit()
    height,width = img.shape[:2]
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("World")
    glfwSetWindowPos(0,0)

    #register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    #gl_state settings
    import OpenGL.GL as gl
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glPointSize(20)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)


    ###event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        update_fps()
        # get an image from the grabber
        s, img = cap.read()
        ref.detector.detect(img)
        if ref.detector.is_done():
            stop_calibration()

        g_pool.player_refresh.set()


        # #gather pattern centers and find cam intrisics
        # if bar.screen_shot and pattern.centers is not None:
        #     bar.screen_shot = False
        #     # calibrate the camera intrinsics if the board is found
        #     # append list of circle grid center points to pattern.img_points
        #     # append generic list of circle grid pattern type to  pattern.obj_points
        #     pattern.centers = circle_grid(img)
        #     pattern.img_points.append(pattern.centers)
        #     pattern.obj_points.append(pattern.obj_grid)
        #     print "Number of Patterns Captured:", len(pattern.img_points)
        #     #if pattern.img_points.shape[0] > 10:
        #     if len(pattern.img_points) > 10:
        #         camera_matrix, dist_coefs = calibrate_camera(np.asarray(pattern.img_points),
        #                                             np.asarray(pattern.obj_points),
        #                                             (img.shape[1], img.shape[0]))
        #         np.save("camera_matrix.npy", camera_matrix)
        #         np.save("dist_coefs.npy", dist_coefs)
        #         pattern.img_points = []
        #         bar.find_pattern.value = False

        ### Setup recording process
        if bar.record_video and not bar.record_running:
            record.path = os.path.join(record.path_parent, "data%03d/" % record.counter)
            while True:
                try:
                    os.mkdir(record.path)
                    break
                except:
                    print "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                    record.counter += 1
                    record.path = os.path.join(record.path_parent, "data%03d/" % record.counter)

            #video
            video_path = os.path.join(record.path, "world.avi")
            #FFV1 -- good speed lossless big file
            #DIVX -- good speed good compression medium file
            record.writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (img.shape[1], img.shape[0]))


            # positions data to eye process
            g_pool.pos_record.value = True
            g_pool.eye_tx.send(record.path)

            bar.record_running = 1
            g_pool.frame_count_record.value = 0

        # While Recording...
        if bar.record_video and bar.record_running:
            # Save image frames to video writer
            # increment the frame_count_record value
            # Eye positions can be associated with frames of recording even if different framerates are used
            record.writer.write(img)
            g_pool.frame_count_record.value += 1


        # Finish all recordings, clean up.
        if not bar.record_video and bar.record_running:
            # for conviniece: copy camera intrinsics into each data folder at the end of a recording.
            try:
                camera_matrix = np.load("camera_matrix.npy")
                dist_coefs = np.load("dist_coefs.npy")
                cam_path = os.path.join(record.path, "camera_matrix.npy")
                dist_path = os.path.join(record.path, "dist_coefs.npy")
                np.save(cam_path, camera_matrix)
                np.save(dist_path, dist_coefs)
            except:
                print "no camera intrinsics found, will not copy them into data folder"

            g_pool.pos_record.value = 0
            del record.writer
            bar.record_running = 0



        ###render the screen
        clear_gl_screen()
        cv2.cvtColor(img, cv2.COLOR_BGR2RGB,img)
        draw_gl_texture(img)

        ###render calibration results:
        if bar.show_calib_result.value:
            cal_pt_cloud = np.load("cal_pt_cloud.npy")
            pX,pY,wX,wY = cal_pt_cloud.transpose()
            map_fn = get_map_from_cloud(cal_pt_cloud,(width,height))
            modelled_world_pts = map_fn((pX,pY))
            pts = np.array(modelled_world_pts,dtype=np.float32).transpose()
            calib_bounds =  cv2.convexHull(pts)[:,0]
            for observed,modelled in zip(zip(wX,wY),np.array(modelled_world_pts).transpose()):
                draw_gl_polyline_norm((modelled,observed),(1.,0.5,0.,.5))
            draw_gl_polyline_norm(calib_bounds,(1.0,0,0,.5))

        #render visual feedback from detector
        ref.detector.display(img)
        # render detector point
        if ref.detector.pos[0] or ref.detector.pos[1]:
            draw_gl_point_norm(ref.detector.pos,(0.,1.,0.,0.5))

        # update gaze point from shared variable pool and draw on screen. If both coords are 0: no pupil pos was detected.
        if g_pool.gaze_x.value !=0 or g_pool.gaze_y.value !=0:
            draw_gl_point_norm((g_pool.gaze_x.value, g_pool.gaze_y.value),(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers()

    ###end while running clean-up
    print "WORLD Process closed"
    glfwCloseWindow()
    glfwTerminate()
Beispiel #10
0
        if plugin ==  "Select to load":
            return
        logger.debug('Open Plugin: %s'%plugin)
        new_plugin = plugin(g_pool)
        g_pool.plugins.add(new_plugin)

    def set_scale(new_scale):
        g_pool.gui.scale = new_scale
        g_pool.gui.collect_menus()


    def get_scale():
        return g_pool.gui.scale


    emotion = Temp()
    emotion.emotion = 'Neutral'
    emotion.Heart_Rate=70

    def print_emo(a,b):

        emotion.emotion = a
        emotion.Heart_Rate = b





    width,height = session_settings.get('window_size',(frame.width, frame.height))
    window_pos = session_settings.get('window_position',(0,0)) # not yet using this one.
Beispiel #11
0
def main():

    # Callback functions
    def on_resize(window,w, h):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(window)
        adjust_gl_view(w,h,window)
        norm_size = normalize((w,h),glfwGetWindowSize(window))
        fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
        atb.TwWindowSize(*map(int,fb_size))
        glfwMakeContextCurrent(active_window)
        for p in g.plugins:
            p.on_window_resize(window,w,h)

    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key,action):
            if action == GLFW_PRESS:
                pass

    def on_char(window,char):
        if not atb.TwEventCharGLFW(char,1):
            pass

    def on_button(window,button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button,action):
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(main_window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
            for p in g.plugins:
                p.on_click(pos,button,action)

    def on_pos(window,x, y):
        norm_pos = normalize((x,y),glfwGetWindowSize(window))
        fb_x,fb_y = denormalize(norm_pos,glfwGetFramebufferSize(window))
        if atb.TwMouseMotion(int(fb_x),int(fb_y)):
            pass

    def on_scroll(window,x,y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        glfwSetWindowShouldClose(main_window,True)
        logger.debug('Process closing from window')


    try:
        rec_dir = sys.argv[1]
    except:
        #for dev, supply hardcoded dir:
        rec_dir = '/Users/mkassner/Desktop/Marker_Tracking_Demo_Recording/'
        if os.path.isdir(rec_dir):
            logger.debug("Dev option: Using hadcoded data dir.")
        else:
            if getattr(sys, 'frozen', False):
                logger.warning("You did not supply a data directory when you called this script! \
                   \nPlease drag a Pupil recoding directory onto the launch icon.")
            else:
                logger.warning("You did not supply a data directory when you called this script! \
                       \nPlease supply a Pupil recoding directory as first arg when calling Pupil Player.")
            return

    if not is_pupil_rec_dir(rec_dir):
        logger.error("You did not supply a dir with the required files inside.")
        return

    #backwards compatibility fn.
    patch_meta_info(rec_dir)

    #parse and load data folder info
    video_path = rec_dir + "/world.avi"
    timestamps_path = rec_dir + "/timestamps.npy"
    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    meta_info_path = rec_dir + "/info.csv"


    #parse info.csv file
    with open(meta_info_path) as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )
    rec_version = meta_info["Capture Software Version"]
    rec_version_float = int(filter(type(rec_version).isdigit, rec_version)[:3])/100. #(get major,minor,fix of version)
    logger.debug("Recording version: %s , %s"%(rec_version,rec_version_float))


    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    positions_by_frame = correlate_gaze(gaze_list,timestamps)


    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir,"user_settings"))
    def load(var_name,default):
        return session_settings.get(var_name,default)
    def save(var_name,var):
        session_settings[var_name] = var


    # Initialize capture
    cap = autoCreateCapture(video_path,timestamps=timestamps_path)

    if isinstance(cap,FakeCapture):
        logger.error("could not start capture.")
        return

    width,height = cap.get_size()


    # Initialize glfw
    glfwInit()
    main_window = glfwCreateWindow(width, height, "Pupil Player: "+meta_info["Recording Name"]+" - "+ rec_dir.split(os.path.sep)[-1], None, None)
    glfwMakeContextCurrent(main_window)

    # Register callbacks main_window
    glfwSetWindowSizeCallback(main_window,on_resize)
    glfwSetWindowCloseCallback(main_window,on_close)
    glfwSetKeyCallback(main_window,on_key)
    glfwSetCharCallback(main_window,on_char)
    glfwSetMouseButtonCallback(main_window,on_button)
    glfwSetCursorPosCallback(main_window,on_pos)
    glfwSetScrollCallback(main_window,on_scroll)


    # create container for globally scoped varfs (within world)
    g = Temp()
    g.plugins = []
    g.play = False
    g.new_seek = True
    g.user_dir = user_dir
    g.rec_dir = rec_dir
    g.app = 'player'
    g.timestamps = timestamps
    g.positions_by_frame = positions_by_frame



    # helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .1 * (1. / dt - bar.fps.value)

    def set_window_size(mode,data):
        width,height = cap.get_size()
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(main_window,w,h)
        data.value=mode # update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    def get_play():
        return g.play

    def set_play(value):
        g.play = value

    def next_frame():
        try:
            cap.seek_to_frame(cap.get_frame_index())
        except FileSeekError:
            pass
        g.new_seek = True

    def prev_frame():
        try:
            cap.seek_to_frame(cap.get_frame_index()-2)
        except FileSeekError:
            pass
        g.new_seek = True



    def open_plugin(selection,data):
        if plugin_by_index[selection] not in additive_plugins:
            for p in g.plugins:
                if isinstance(p,plugin_by_index[selection]):
                    return

        g.plugins = [p for p in g.plugins if p.alive]
        logger.debug('Open Plugin: %s'%name_by_index[selection])
        new_plugin = plugin_by_index[selection](g)
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)

        if hasattr(new_plugin,'init_gui'):
            new_plugin.init_gui()
        # save the value for atb bar
        data.value=selection

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    atb.init()
    # add main controls ATB bar
    bar = atb.Bar(name = "Controls", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,valueswidth=150,
            text='light', position=(10, 10),refresh=.1, size=(300, 160))
    bar.next_atb_pos = (10,220)
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.window_size = c_int(load("window_size",0))
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})
    bar.version = create_string_buffer(version,512)
    bar.recording_version = create_string_buffer(rec_version,512)
    bar.add_var("fps", bar.fps, step=1., readonly=True)
    bar._fps = c_float(cap.get_fps())
    bar.add_var("recoding fps",bar._fps,readonly=True)
    bar.add_var("display size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("play",vtype=c_bool,getter=get_play,setter=set_play,key="space")
    bar.add_button('step next',next_frame,key='right')
    bar.add_button('step prev',prev_frame,key='left')
    bar.add_var("frame index",vtype=c_int,getter=lambda:cap.get_frame_index()-1 )

    bar.plugin_to_load = c_int(0)
    plugin_type_enum = atb.enum("Plug In",index_by_name)
    bar.add_var("plugin",setter=open_plugin,getter=get_from_data,data=bar.plugin_to_load, vtype=plugin_type_enum)
    bar.add_var("version of recording",bar.recording_version, readonly=True, help="version of the capture software used to make this recording")
    bar.add_var("version of player",bar.version, readonly=True, help="version of the Pupil Player")
    bar.add_button("exit", on_close,data=main_window,key="esc")

    #set the last saved window size
    set_window_size(bar.window_size.value,bar.window_size)
    on_resize(main_window, *glfwGetWindowSize(main_window))
    glfwSetWindowPos(main_window,0,0)


    #we always load these plugins
    g.plugins.append(Export_Launcher(g,data_dir=rec_dir,frame_count=len(timestamps)))
    g.plugins.append(Seek_Bar(g,capture=cap))
    g.trim_marks = Trim_Marks(g,capture=cap)
    g.plugins.append(g.trim_marks)

    #these are loaded based on user settings
    for initializer in load('plugins',[]):
        name, args = initializer
        logger.debug("Loading plugin: %s with settings %s"%(name, args))
        try:
            p = plugin_by_name[name](g,**args)
            g.plugins.append(p)
        except:
            logger.warning("Plugin '%s' failed to load from settings file." %name)

    if load('plugins',"_") == "_":
        #lets load some default if we dont have presets
        g.plugins.append(Scan_Path(g))
        g.plugins.append(Vis_Polyline(g))
        g.plugins.append(Vis_Circle(g))
        # g.plugins.append(Vis_Light_Points(g))

    #sort by exec order
    g.plugins.sort(key=lambda p: p.order)

    #init gui
    for p in g.plugins:
        if hasattr(p,'init_gui'):
            p.init_gui()

    # gl_state settings
    basic_gl_setup()
    g.image_tex = create_named_texture((height,width,3))


    while not glfwWindowShouldClose(main_window):

        update_fps()

        #grab new frame
        if g.play or g.new_seek:
            try:
                new_frame = cap.get_frame()
            except EndofVideoFileError:
                #end of video logic: pause at last frame.
                g.play=False

            if g.new_seek:
                display_time = new_frame.timestamp
                g.new_seek = False

        frame = new_frame.copy()
        #new positons and events we make a deepcopy just like the image is a copy.
        current_pupil_positions = deepcopy(positions_by_frame[frame.index])
        events = []

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,current_pupil_positions,events)

        #check if a plugin need to be destroyed
        g.plugins = [p for p in g.plugins if p.alive]

        # render camera image
        glfwMakeContextCurrent(main_window)
        make_coord_system_norm_based()
        draw_named_texture(g.image_tex,frame.img)
        make_coord_system_pixel_based(frame.img.shape)
        # render visual feedback from loaded plugins
        for p in g.plugins:
            p.gl_display()

        #present frames at appropriate speed
        wait_time = frame.timestamp - display_time
        display_time = frame.timestamp
        try:
            spent_time = time()-timestamp
            sleep(wait_time-spent_time)
        except:
            pass
        timestamp = time()


        atb.draw()
        glfwSwapBuffers(main_window)
        glfwPollEvents()

    plugin_save = []
    for p in g.plugins:
        try:
            p_initializer = p.get_class_name(),p.get_init_dict()
            plugin_save.append(p_initializer)
        except AttributeError:
            #not all plugins need to be savable, they will not have the init dict.
            # any object without a get_init_dict method will throw this exception.
            pass

    # de-init all running plugins
    for p in g.plugins:
        p.alive = False
        #reading p.alive actually runs plug-in cleanup
        _ = p.alive

    save('plugins',plugin_save)
    save('window_size',bar.window_size.value)
    session_settings.close()

    cap.close()
    bar.destroy()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    logger.debug("Process done")
Beispiel #12
0
def main():

    #to assign by name: string(s) in list
    eye_src = ["Microsoft", "6000"]
    world_src = ["Logitech Camera","(046d:081d)","C525","C615","C920"] # "(046d:081d)" is the (automated replacement) name of C510

    #uncomment to assign cameras directly: use ints
    # eye_src = 0
    # world_src = 1

    #to use a video: string (not inside a list)
    # eye_src = "/Users/mkassner/Pupil/pupil_google_code/wiki/videos/green_eye_VISandIR_2.mov"
    # world_src = 0

    #video size
    eye_size = (320,240)
    """
        HD-6000
        v4l2-ctl -d /dev/videoN --list-formats-ext
        640x480 1280x720 960x544 800x448 640x360 800x600
        416x240 352x288 176x144 320x240 160x120
    """
    world_size = (1280,720)
    """
        c-525
        v4l2-ctl -d /dev/videoN --list-formats-ext
        640x480 160x120 176x144 320x176 320x240 432x240
        352x288 544x288 640x360 752x416 800x448 864x480
        960x544 1024x576 800x600 1184x656 960x720
        1280x720 1392x768 1504x832 1600x896 1280x960
        1712x960 1792x1008 1920x1080
    """


    # use the player: a seperate window for video playback and 9 point calibration animation
    use_player = 1
    player_size = (640,480) #startup size: this can be whatever you like


    # world_uvc_camera, eye_uvc_camera = None,None
    audio = False
    # create shared globals
    g_pool = Temp()
    g_pool.gaze_x = Value('d', 0.0)
    g_pool.gaze_y = Value('d', 0.0)
    g_pool.ref_x = Value('d', 0.0)
    g_pool.ref_y = Value('d', 0.0)
    g_pool.frame_count_record = Value('i', 0)
    g_pool.calibrate = RawValue(c_bool, 0)
    g_pool.cal9 = RawValue(c_bool, 0)
    g_pool.cal9_stage = RawValue('i', 0)
    g_pool.cal9_step = Value('i', 0)
    g_pool.cal9_circle_id = RawValue('i' ,0)
    g_pool.pos_record = Value(c_bool, 0)
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.player_refresh = Event()
    g_pool.play = RawValue(c_bool,0)
    g_pool.quit = RawValue(c_bool,0)
    g_pool.eye_src = eye_src
    g_pool.eye_size = eye_size
    g_pool.world_src = world_src
    g_pool.world_size = world_size
    # end shared globals

    # set up sub processes
    p_eye = Process(target=eye, args=(g_pool,))
    if use_player: p_player = Process(target=player, args=(g_pool,player_size))

    # spawn sub processes
    if use_player: p_player.start()
    p_eye.start()

    # on Linux, we need to give the camera driver some time before you request another camera
    sleep(1)

    # on Mac, when using some cameras (like our current logitech worldcamera)
    # you can't run world camera grabber in its own process
    # it must reside in the main process when you run on MacOS.
    world_profiled(g_pool)

    # exit / clean-up
    p_eye.join()
    if use_player: p_player.join()
    print "main exit"
Beispiel #13
0
def player(g_pool,size):
    """player
        - Shows 9 point calibration pattern
        - Plays a source video synchronized with world process
        - Get src videos from directory (glob)
        - Iterate through videos on each record event
    """


    grid = make_grid()
    grid *=2.5###scale to fit
    # player object
    player = Temp()
    player.play_list = glob('src_video/*')
    path_parent = os.path.dirname( os.path.abspath(sys.argv[0]))
    player.playlist = [os.path.join(path_parent, path) for path in player.play_list]
    player.captures = [autoCreateCapture(src) for src in player.playlist]
    print "Player found %i videos in src_video"%len(player.captures)
    player.captures =  [c for c in player.captures if c is not None]
    print "Player sucessfully loaded %i videos in src_video"%len(player.captures)
    # for c in player.captures: c.auto_rewind = False
    player.current_video = 0

    # Callbacks
    def on_resize(w, h):
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if key == GLFW_KEY_ESC:
                on_close()
    def on_char(char, pressed):
        if pressed:
            g_pool.player_input.value = char


    def on_close():
        g_pool.quit.value = True
        print "Player Process closing from window"

    def draw_circle(pos,r,c):
        pts = cv2.ellipse2Poly(tuple(pos),(r,r),0,0,360,10)
        draw_gl_polyline(pts,c,'Polygon')

    def draw_marker(pos):
        pos = int(pos[0]),int(pos[1])
        black = (0.,0.,0.,1.)
        white = (1.,1.,1.,1.)
        for r,c in zip((50,40,30,20,10),(black,white,black,white,black)):
            draw_circle(pos,r,c)

    # Initialize glfw
    glfwInit()
    glfwOpenWindow(size[0], size[1], 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("Player")
    glfwSetWindowPos(100,0)
    glfwDisable(GLFW_AUTO_POLL_EVENTS)


    # Callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)


    # gl state settings
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    gl.glDisable (gl.GL_DEPTH_TEST)
    gl.glClearColor(1.,1.,1.,0.)



    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        glfwPollEvents()
        if g_pool.player_refresh.wait(0.01):
            g_pool.player_refresh.clear()

            clear_gl_screen()
            if g_pool.marker_state.value !=0:

                # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
                #
                ############################
                #            r             #
                # 0,0##################w,h #
                # #                      # #
                # #                      # #
                #r#                      #r#
                # #                      # #
                # #                      # #
                # 0,h##################w,h #
                #            r             #
                ############################
                r = 60
                gl.glMatrixMode(gl.GL_PROJECTION)
                gl.glLoadIdentity()
                # compensate for radius of marker
                gluOrtho2D(-r,glfwGetWindowSize()[0]+r,glfwGetWindowSize()[1]+r, -r) # origin in the top left corner just like the img np-array
                # Switch back to Model View Matrix
                gl.glMatrixMode(gl.GL_MODELVIEW)
                gl.glLoadIdentity()


                screen_pos = denormalize(g_pool.marker[:],glfwGetWindowSize(),flip_y=True)

                #some feedback on the detection state
                draw_marker(screen_pos)
                if g_pool.ref[:] == [0.,0.]:
                    # world ref is detected
                    draw_gl_point(screen_pos, 5.0, (1.,0.,0.,1.))
                else:
                    draw_gl_point(screen_pos, 5.0, (0.,1.,0.,1.))

            elif g_pool.play.value:
                if len(player.captures):
                    frame = player.captures[player.current_video].get_frame()
                    img = frame.img
                    if img:
                        draw_gl_texture(img)
                    else:
                        player.captures[player.current_video].rewind()
                        player.current_video +=1
                        if player.current_video >= len(player.captures):
                            player.current_video = 0
                        g_pool.play.value = False
                else:
                    print 'PLAYER: Warning: No Videos available to play. Please put your vidoes into a folder called "src_video" in the Capture folder.'
                    g_pool.play.value = False
            glfwSwapBuffers()

    glfwCloseWindow()
    glfwTerminate()
    print "PLAYER Process closed"
Beispiel #14
0
def main():
    """Batch process recordings to produce visualizations
    Using simple_circle as the default visualizations
    Steps:
        - User Supplies: Directory that contains many recording(s) dirs or just one recordings dir
        - We walk the user supplied directory to get all data folders
        - Data is the list we feed to our multiprocessed
        - Error check -- do we have required files in each dir?: world.avi, gaze_positions.npy, timestamps.npy
        - Result: world_viz.avi within each original data folder
    """

    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=dedent('''\
            ***************************************************
            Batch process recordings to produce visualizations
            The default visualization will use simple_circle

            Usage Example:
                python batch_export.py -d /path/to/folder-with-many-recordings -s ~/Pupil_Player/settings/user_settings -e ~/my_export_dir
            Arguments:
                -d : Specify a recording directory.
                     This could have one or many recordings contained within it.
                     We will recurse into the dir.
                -s : Specify path to Pupil Player user_settings file to use last used vizualization settings.
                -e : Specify export directory if you dont what the export saved within each recording dir.
                -c : If you dont use the player settings file, we use a default circle.
                     You can specify color to be used
                     Available options:
                        white, red, green
                -p : Export a 120 frame preview only.
            ***************************************************\
        '''))
    parser.add_argument('-d', '--rec-dir', required=True)
    parser.add_argument('-s', '--settings-file', default=False)
    parser.add_argument('-e', '--export-to-dir', default=False)
    parser.add_argument('-c', '--basic-color', default='red')
    parser.add_argument('-p', '--preview', action='store_true')

    if len(sys.argv) == 1:
        print parser.description
        return

    args = parser.parse_args()
    # get the top level data folder from terminal argument

    data_dir = args.rec_dir

    if args.settings_file and os.path.isfile(args.settings_file):
        session_settings = shelve.open(os.path.splitext(args.settings_file)[0],
                                       protocol=2)
        #these are loaded based on user settings
        plugin_initializers = session_settings.get('plugins', [])
        session_settings.close()

        for initializer in plugin_initializers:
            name, var = initializer
            logger.debug("Loading plugin: %s with settings %s" % (name, var))
    else:
        session_settings = {}
        if args.basic_color:
            try:
                color = color_lookup(args.basic_color)
            except KeyError:
                logger.warning("Not a real color. Choosing red")
                color = color_lookup('red')
        else:
            logger.warning("No color selected. Choosing red")
            color = color_lookup('red')

        #load a simple cirlce plugin as default
        logger.debug("Loading default plugin: Vis_Circle with color: %s" %
                     (color, ))

        plugin_initializers = [("Vis_Circle", {
            'thickness': 2,
            'color': color
        })]

    if args.export_to_dir:
        export_dir = args.export_to_dir
        if os.path.isdir(export_dir):
            logger.info("Exporting all vids to %s" % export_dir)
        else:
            logger.info("Exporting dir is not valid %s" % export_dir)
            return
    else:
        export_dir = None

    if args.preview:
        preview = True
    else:
        preview = False

    g = Temp()
    g.app = "batch_export"

    recording_dirs = get_recording_dirs(data_dir)
    for r in recording_dirs:
        patch_meta_info(r)

    # start multiprocessing engine
    n_cpu = cpu_count()
    logger.info(
        "Using a maximum of %s CPUs to process visualizations in parallel..." %
        n_cpu)

    jobs = []
    outfiles = set()
    for d in recording_dirs:
        j = Temp()
        logger.info("Adding new export: %s" % d)
        j.should_terminate = Value(c_bool, 0)
        j.frames_to_export = Value('i', 0)
        j.current_frame = Value('i', 0)
        j.data_dir = d
        j.start_frame = None
        if preview:
            j.end_frame = 30
        else:
            j.end_frame = None
        j.plugin_initializers = plugin_initializers[:]

        if export_dir:
            #make a unique name created from rec_session and dir name
            rec_session, rec_dir = d.rsplit(os.path.sep, 2)[1:]
            out_name = rec_session + "_" + rec_dir + ".avi"
            j.out_file_path = os.path.join(os.path.expanduser(export_dir),
                                           out_name)
            if j.out_file_path in outfiles:
                logger.error(
                    "This export setting would try to save %s at least twice pleace rename dirs to prevent this."
                    % j.out_file_path)
                return
            outfiles.add(j.out_file_path)
            logger.info("Exporting to: %s" % j.out_file_path)

        else:
            j.out_file_path = None

        j.args = (j.should_terminate, j.frames_to_export, j.current_frame,
                  j.data_dir, j.start_frame, j.end_frame,
                  j.plugin_initializers, j.out_file_path)
        jobs.append(j)

    todo = jobs[:]
    workers = [
        Process(target=export, args=todo.pop(0).args)
        for i in range(min(len(todo), n_cpu))
    ]
    for w in workers:
        w.start()

    working = True

    t = time.time()
    while working:  #cannot use pool as it does not allow shared memory
        working = False
        for i in range(len(workers)):
            if workers[i].is_alive():
                working = True
            else:
                if todo:
                    workers[i] = Process(target=export, args=todo.pop(0).args)
                    workers[i].start()
                    working = True
        show_progess(jobs)
        time.sleep(.25)
    print "\n"

    #lets give some cpu performance feedback.
    total_frames = sum((j.frames_to_export.value for j in jobs))
    total_secs = time.time() - t
    logger.info("Export done. Exported %s frames in %s seconds. %s fps" %
                (total_frames, total_secs, total_frames / total_secs))
Beispiel #15
0
def export(should_terminate,frames_to_export,current_frame, data_dir,start_frame=None,end_frame=None,plugin_initializers=[],out_file_path=None):


    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )

    #parse and load data dir info
    video_path = data_dir + "/world.avi"
    timestamps_path = data_dir + "/timestamps.npy"
    gaze_positions_path = data_dir + "/gaze_positions.npy"
    record_path = data_dir + "/world_viz.avi"


    #parse info.csv file
    with open(data_dir + "/info.csv") as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )
    rec_version = meta_info["Capture Software Version"]
    rec_version_int = int(filter(type(rec_version).isdigit, rec_version)[:3])/100 #(get major,minor,fix of version)
    logger.debug("Exporting a video from recording with version: %s , %s"%(rec_version,rec_version_int))


    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)
    #correlate data
    positions_by_frame = correlate_gaze(gaze_list,timestamps)


    # Initialize capture, check if it works
    cap = autoCreateCapture(video_path,timestamps=timestamps_path)
    if cap is None:
        logger.error("Did not receive valid Capture")
        return
    width,height = cap.get_size()

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(data_dir, "world_viz.avi")
    else:
        file_name =  os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = data_dir
        if not file_name:
            file_name = 'world_viz.avi'
        out_file_path = os.path.expanduser(os.path.join(dir_name,file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s"%out_file_path)


    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps)==0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give an job lenght and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug("Will export from frame %s to frame %s. This means I will export %s frames."%(start_frame,start_frame+frames_to_export.value,frames_to_export.value))


    #lets get the avg. framerate for our slice of video:
    fps = float(len(trimmed_timestamps))/(trimmed_timestamps[-1] - trimmed_timestamps[0])
    logger.debug("Framerate of export video is %s"%fps)


    #setup of writer
    writer = cv2.VideoWriter(out_file_path, cv2.cv.CV_FOURCC(*'DIVX'), fps, (width,height))

    cap.seek_to_frame(start_frame)

    start_time = time()


    plugins = []
    g = Temp()
    g.plugins = plugins
    g.app = 'exporter'

    # load plugins from initializers:
    for initializer in plugin_initializers:
        name, args = initializer
        logger.debug("Loading plugin: %s with settings %s"%(name, args))
        try:
            p = plugin_by_name[name](g,**args)
            plugins.append(p)
        except:
            logger.warning("Plugin '%s' failed to load." %name)


    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s."%(current_frame.value,out_file_path))

            #explicit release of VideoWriter
            writer.release()
            writer = None
            return False

        new_frame = cap.get_frame()
        #end of video logic: pause at last frame.
        if not new_frame:
            logger.error("Could not read all frames.")
            #explicit release of VideoWriter
            writer.release()
            writer = None
            return False
        else:
            frame = new_frame

        #new positons and events
        current_pupil_positions = positions_by_frame[frame.index]
        events = None

        # allow each Plugin to do its work.
        for p in plugins:
            p.update(frame,current_pupil_positions,events)

        # # render gl visual feedback from loaded plugins
        # for p in plugins:
        #     p.gl_display(frame)

        writer.write(frame.img)
        current_frame.value +=1

    writer.release()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    logger.info("Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"%(current_frame.value,out_file_path,duration,effective_fps))


    return True
Beispiel #16
0
def main():

    # Callback functions
    def on_resize(window, w, h):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(window)
        adjust_gl_view(w, h, window)
        norm_size = normalize((w, h), glfwGetWindowSize(window))
        fb_size = denormalize(norm_size, glfwGetFramebufferSize(window))
        atb.TwWindowSize(*map(int, fb_size))
        glfwMakeContextCurrent(active_window)
        for p in g.plugins:
            p.on_window_resize(window, w, h)

    def on_key(window, key, scancode, action, mods):
        if not atb.TwEventKeyboardGLFW(key, action):
            if action == GLFW_PRESS:
                pass

    def on_char(window, char):
        if not atb.TwEventCharGLFW(char, 1):
            pass

    def on_button(window, button, action, mods):
        if not atb.TwEventMouseButtonGLFW(button, action):
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(main_window))
            pos = denormalize(pos,
                              (frame.img.shape[1],
                               frame.img.shape[0]))  # Position in img pixels
            for p in g.plugins:
                p.on_click(pos, button, action)

    def on_pos(window, x, y):
        norm_pos = normalize((x, y), glfwGetWindowSize(window))
        fb_x, fb_y = denormalize(norm_pos, glfwGetFramebufferSize(window))
        if atb.TwMouseMotion(int(fb_x), int(fb_y)):
            pass

    def on_scroll(window, x, y):
        if not atb.TwMouseWheel(int(x)):
            pass

    def on_close(window):
        glfwSetWindowShouldClose(main_window, True)
        logger.debug('Process closing from window')

    try:
        rec_dir = sys.argv[1]
    except:
        #for dev, supply hardcoded dir:
        rec_dir = '/Users/mkassner/Desktop/Marker_Tracking_Demo_Recording/'
        if os.path.isdir(rec_dir):
            logger.debug("Dev option: Using hadcoded data dir.")
        else:
            if getattr(sys, 'frozen', False):
                logger.warning(
                    "You did not supply a data directory when you called this script! \
                   \nPlease drag a Pupil recoding directory onto the launch icon."
                )
            else:
                logger.warning(
                    "You did not supply a data directory when you called this script! \
                       \nPlease supply a Pupil recoding directory as first arg when calling Pupil Player."
                )
            return

    if not is_pupil_rec_dir(rec_dir):
        logger.error(
            "You did not supply a dir with the required files inside.")
        return

    #backwards compatibility fn.
    patch_meta_info(rec_dir)

    #parse and load data folder info
    video_path = rec_dir + "/world.avi"
    timestamps_path = rec_dir + "/timestamps.npy"
    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    meta_info_path = rec_dir + "/info.csv"

    #parse info.csv file
    with open(meta_info_path) as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))
    rec_version = meta_info["Capture Software Version"]
    rec_version_float = int(
        filter(type(rec_version).isdigit,
               rec_version)[:3]) / 100.  #(get major,minor,fix of version)
    logger.debug("Recording version: %s , %s" %
                 (rec_version, rec_version_float))

    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    positions_by_frame = correlate_gaze(gaze_list, timestamps)

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir, "user_settings"))

    def load(var_name, default):
        return session_settings.get(var_name, default)

    def save(var_name, var):
        session_settings[var_name] = var

    # Initialize capture
    cap = autoCreateCapture(video_path, timestamps=timestamps_path)

    if isinstance(cap, FakeCapture):
        logger.error("could not start capture.")
        return

    width, height = cap.get_size()

    # Initialize glfw
    glfwInit()
    main_window = glfwCreateWindow(
        width, height, "Pupil Player: " + meta_info["Recording Name"] + " - " +
        rec_dir.split(os.path.sep)[-1], None, None)
    glfwMakeContextCurrent(main_window)

    # Register callbacks main_window
    glfwSetWindowSizeCallback(main_window, on_resize)
    glfwSetWindowCloseCallback(main_window, on_close)
    glfwSetKeyCallback(main_window, on_key)
    glfwSetCharCallback(main_window, on_char)
    glfwSetMouseButtonCallback(main_window, on_button)
    glfwSetCursorPosCallback(main_window, on_pos)
    glfwSetScrollCallback(main_window, on_scroll)

    # create container for globally scoped varfs (within world)
    g = Temp()
    g.plugins = []
    g.play = False
    g.new_seek = True
    g.user_dir = user_dir
    g.rec_dir = rec_dir
    g.app = 'player'
    g.timestamps = timestamps
    g.positions_by_frame = positions_by_frame

    # helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .1 * (1. / dt - bar.fps.value)

    def set_window_size(mode, data):
        width, height = cap.get_size()
        ratio = (1, .75, .5, .25)[mode]
        w, h = int(width * ratio), int(height * ratio)
        glfwSetWindowSize(main_window, w, h)
        data.value = mode  # update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    def get_play():
        return g.play

    def set_play(value):
        g.play = value

    def next_frame():
        try:
            cap.seek_to_frame(cap.get_frame_index())
        except FileSeekError:
            pass
        g.new_seek = True

    def prev_frame():
        try:
            cap.seek_to_frame(cap.get_frame_index() - 2)
        except FileSeekError:
            pass
        g.new_seek = True

    def open_plugin(selection, data):
        if plugin_by_index[selection] not in additive_plugins:
            for p in g.plugins:
                if isinstance(p, plugin_by_index[selection]):
                    return

        g.plugins = [p for p in g.plugins if p.alive]
        logger.debug('Open Plugin: %s' % name_by_index[selection])
        new_plugin = plugin_by_index[selection](g)
        g.plugins.append(new_plugin)
        g.plugins.sort(key=lambda p: p.order)

        if hasattr(new_plugin, 'init_gui'):
            new_plugin.init_gui()
        # save the value for atb bar
        data.value = selection

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    atb.init()
    # add main controls ATB bar
    bar = atb.Bar(name="Controls",
                  label="Controls",
                  help="Scene controls",
                  color=(50, 50, 50),
                  alpha=100,
                  valueswidth=150,
                  text='light',
                  position=(10, 10),
                  refresh=.1,
                  size=(300, 160))
    bar.next_atb_pos = (10, 220)
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.window_size = c_int(load("window_size", 0))
    window_size_enum = atb.enum("Display Size", {
        "Full": 0,
        "Medium": 1,
        "Half": 2,
        "Mini": 3
    })
    bar.version = create_string_buffer(version, 512)
    bar.recording_version = create_string_buffer(rec_version, 512)
    bar.add_var("fps", bar.fps, step=1., readonly=True)
    bar._fps = c_float(cap.get_fps())
    bar.add_var("recoding fps", bar._fps, readonly=True)
    bar.add_var("display size",
                vtype=window_size_enum,
                setter=set_window_size,
                getter=get_from_data,
                data=bar.window_size)
    bar.add_var("play",
                vtype=c_bool,
                getter=get_play,
                setter=set_play,
                key="space")
    bar.add_button('step next', next_frame, key='right')
    bar.add_button('step prev', prev_frame, key='left')
    bar.add_var("frame index",
                vtype=c_int,
                getter=lambda: cap.get_frame_index() - 1)

    bar.plugin_to_load = c_int(0)
    plugin_type_enum = atb.enum("Plug In", index_by_name)
    bar.add_var("plugin",
                setter=open_plugin,
                getter=get_from_data,
                data=bar.plugin_to_load,
                vtype=plugin_type_enum)
    bar.add_var(
        "version of recording",
        bar.recording_version,
        readonly=True,
        help="version of the capture software used to make this recording")
    bar.add_var("version of player",
                bar.version,
                readonly=True,
                help="version of the Pupil Player")
    bar.add_button("exit", on_close, data=main_window, key="esc")

    #set the last saved window size
    set_window_size(bar.window_size.value, bar.window_size)
    on_resize(main_window, *glfwGetWindowSize(main_window))
    glfwSetWindowPos(main_window, 0, 0)

    #we always load these plugins
    g.plugins.append(
        Export_Launcher(g, data_dir=rec_dir, frame_count=len(timestamps)))
    g.plugins.append(Seek_Bar(g, capture=cap))
    g.trim_marks = Trim_Marks(g, capture=cap)
    g.plugins.append(g.trim_marks)

    #these are loaded based on user settings
    for initializer in load('plugins', []):
        name, args = initializer
        logger.debug("Loading plugin: %s with settings %s" % (name, args))
        try:
            p = plugin_by_name[name](g, **args)
            g.plugins.append(p)
        except:
            logger.warning("Plugin '%s' failed to load from settings file." %
                           name)

    if load('plugins', "_") == "_":
        #lets load some default if we dont have presets
        g.plugins.append(Scan_Path(g))
        g.plugins.append(Vis_Polyline(g))
        g.plugins.append(Vis_Circle(g))
        # g.plugins.append(Vis_Light_Points(g))

    #sort by exec order
    g.plugins.sort(key=lambda p: p.order)

    #init gui
    for p in g.plugins:
        if hasattr(p, 'init_gui'):
            p.init_gui()

    # gl_state settings
    basic_gl_setup()
    g.image_tex = create_named_texture((height, width, 3))

    while not glfwWindowShouldClose(main_window):

        update_fps()

        #grab new frame
        if g.play or g.new_seek:
            try:
                new_frame = cap.get_frame()
            except EndofVideoFileError:
                #end of video logic: pause at last frame.
                g.play = False

            if g.new_seek:
                display_time = new_frame.timestamp
                g.new_seek = False

        frame = new_frame.copy()
        #new positons and events we make a deepcopy just like the image is a copy.
        current_pupil_positions = deepcopy(positions_by_frame[frame.index])
        events = []

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, current_pupil_positions, events)

        #check if a plugin need to be destroyed
        g.plugins = [p for p in g.plugins if p.alive]

        # render camera image
        glfwMakeContextCurrent(main_window)
        make_coord_system_norm_based()
        draw_named_texture(g.image_tex, frame.img)
        make_coord_system_pixel_based(frame.img.shape)
        # render visual feedback from loaded plugins
        for p in g.plugins:
            p.gl_display()

        #present frames at appropriate speed
        wait_time = frame.timestamp - display_time
        display_time = frame.timestamp
        try:
            spent_time = time() - timestamp
            sleep(wait_time - spent_time)
        except:
            pass
        timestamp = time()

        atb.draw()
        glfwSwapBuffers(main_window)
        glfwPollEvents()

    plugin_save = []
    for p in g.plugins:
        try:
            p_initializer = p.get_class_name(), p.get_init_dict()
            plugin_save.append(p_initializer)
        except AttributeError:
            #not all plugins need to be savable, they will not have the init dict.
            # any object without a get_init_dict method will throw this exception.
            pass

    # de-init all running plugins
    for p in g.plugins:
        p.alive = False
        #reading p.alive actually runs plug-in cleanup
        _ = p.alive

    save('plugins', plugin_save)
    save('window_size', bar.window_size.value)
    session_settings.close()

    cap.close()
    bar.destroy()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    logger.debug("Process done")
Beispiel #17
0
def world(g_pool):
    """world
    """

    # Callback functions
    def on_resize(w, h):
        atb.TwWindowSize(w, h);
        adjust_gl_view(w,h)

    def on_key(key, pressed):
        if not atb.TwEventKeyboardGLFW(key,pressed):
            if pressed:
                if key == GLFW_KEY_ESC:
                    on_close()

    def on_char(char, pressed):
        if not atb.TwEventCharGLFW(char,pressed):
            pass

    def on_button(button, pressed):
        if not atb.TwEventMouseButtonGLFW(button,pressed):
            if pressed:
                pos = glfwGetMousePos()
                pos = normalize(pos,glfwGetWindowSize())
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                for p in g.plugins:
                    p.on_click(pos)

    def on_pos(x, y):
        if atb.TwMouseMotion(x,y):
            pass

    def on_scroll(pos):
        if not atb.TwMouseWheel(pos):
            pass

    def on_close():
        g_pool.quit.value = True
        print "WORLD Process closing from window"





    # load session persistent settings
    session_settings = shelve.open('user_settings_world',protocol=2)
    def load(var_name,default):
        try:
            return session_settings[var_name]
        except:
            return default
    def save(var_name,var):
        session_settings[var_name] = var


    # gaze object
    gaze = Temp()
    gaze.map_coords = (0., 0.)
    gaze.image_coords = (0., 0.)

    # Initialize capture, check if it works
    cap = autoCreateCapture(g_pool.world_src, g_pool.world_size,24)
    if cap is None:
        print "WORLD: Error could not create Capture"
        return
    frame = cap.get_frame()
    if frame.img is None:
        print "WORLD: Error could not get image"
        return
    height,width = frame.img.shape[:2]


    # helpers called by the main atb bar
    def update_fps():
        old_time, bar.timestamp = bar.timestamp, time()
        dt = bar.timestamp - old_time
        if dt:
            bar.fps.value += .05 * (1 / dt - bar.fps.value)

    def set_window_size(mode,data):
        height,width = frame.img.shape[:2]
        ratio = (1,.75,.5,.25)[mode]
        w,h = int(width*ratio),int(height*ratio)
        glfwSetWindowSize(w,h)
        data.value=mode # update the bar.value

    def get_from_data(data):
        """
        helper for atb getter and setter use
        """
        return data.value

    def open_calibration(selection,data):
        # prepare destruction of old ref_detector.
        if g.current_ref_detector:
            g.current_ref_detector.alive = False

        # remove old ref detector from list of plugins
        g.plugins = [p for p in g.plugins if p.alive]

        print "selected: ",reference_detectors.name_by_index[selection]
        g.current_ref_detector = reference_detectors.detector_by_index[selection](global_calibrate=g_pool.calibrate,
                                                                    shared_pos=g_pool.ref,
                                                                    screen_marker_pos = g_pool.marker,
                                                                    screen_marker_state = g_pool.marker_state,
                                                                    atb_pos=bar.next_atb_pos)

        g.plugins.append(g.current_ref_detector)
        # save the value for atb bar
        data.value=selection

    def toggle_record_video():
        if any([True for p in g.plugins if isinstance(p,recorder.Recorder)]):
            for p in g.plugins:
                if isinstance(p,recorder.Recorder):
                    p.alive = False
        else:
            # set up folder within recordings named by user input in atb
            if not bar.rec_name.value:
                bar.rec_name.value = recorder.get_auto_name()
            recorder_instance = recorder.Recorder(bar.rec_name.value, bar.fps.value, frame.img.shape, g_pool.pos_record, g_pool.eye_tx)
            g.plugins.append(recorder_instance)

    def toggle_show_calib_result():
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False
        else:
            calib = Show_Calibration(frame.img.shape)
            g.plugins.append(calib)

    def show_calib_result():
        # kill old if any
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False
            g.plugins = [p for p in g.plugins if p.alive]
        # make new
        calib = Show_Calibration(frame.img.shape)
        g.plugins.append(calib)

    def hide_calib_result():
        if any([True for p in g.plugins if isinstance(p,Show_Calibration)]):
            for p in g.plugins:
                if isinstance(p,Show_Calibration):
                    p.alive = False

    # Initialize ant tweak bar - inherits from atb.Bar
    atb.init()
    bar = atb.Bar(name = "World", label="Controls",
            help="Scene controls", color=(50, 50, 50), alpha=100,valueswidth=150,
            text='light', position=(10, 10),refresh=.3, size=(300, 200))
    bar.next_atb_pos = (10,220)
    bar.fps = c_float(0.0)
    bar.timestamp = time()
    bar.calibration_type = c_int(load("calibration_type",0))
    bar.show_calib_result = c_bool(0)
    bar.record_video = c_bool(0)
    bar.record_running = c_bool(0)
    bar.play = g_pool.play
    bar.window_size = c_int(load("window_size",0))
    window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3})

    bar.calibrate_type_enum = atb.enum("Calibration Method",reference_detectors.index_by_name)
    bar.rec_name = create_string_buffer(512)
    bar.rec_name.value = recorder.get_auto_name()
    # play and record can be tied together via pointers to the objects
    # bar.play = bar.record_video
    bar.add_var("fps", bar.fps, step=1., readonly=True)
    bar.add_var("display size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size)
    bar.add_var("calibration method",setter=open_calibration,getter=get_from_data,data=bar.calibration_type, vtype=bar.calibrate_type_enum,group="Calibration", help="Please choose your desired calibration method.")
    bar.add_button("show calibration result",toggle_show_calib_result, group="Calibration", help="Click to show calibration result.")
    bar.add_var("session name",bar.rec_name, group="Recording", help="creates folder Data_Name_XXX, where xxx is an increasing number")
    bar.add_button("record", toggle_record_video, key="r", group="Recording", help="Start/Stop Recording")
    bar.add_separator("Sep1")
    bar.add_var("play video", bar.play, help="play a video in the Player window")
    bar.add_var("exit", g_pool.quit)

    # add uvc camera controls to a seperate ATB bar
    cap.create_atb_bar(pos=(320,10))


    # create container for globally scoped vars (within world)
    g = Temp()
    g.plugins = []
    g.current_ref_detector = None
    open_calibration(bar.calibration_type.value,bar.calibration_type)

    # Initialize glfw
    glfwInit()
    height,width = frame.img.shape[:2]
    glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW)
    glfwSetWindowTitle("World")
    glfwSetWindowPos(0,0)

    #set the last saved window size
    set_window_size(bar.window_size.value,bar.window_size)

    # Register callbacks
    glfwSetWindowSizeCallback(on_resize)
    glfwSetWindowCloseCallback(on_close)
    glfwSetKeyCallback(on_key)
    glfwSetCharCallback(on_char)
    glfwSetMouseButtonCallback(on_button)
    glfwSetMousePosCallback(on_pos)
    glfwSetMouseWheelCallback(on_scroll)

    # gl_state settings
    import OpenGL.GL as gl
    gl.glEnable(gl.GL_POINT_SMOOTH)
    gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
    gl.glEnable(gl.GL_BLEND)
    del gl

    # Event loop
    while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value:
        # Get input characters entered in player
        if g_pool.player_input.value:
            player_input = g_pool.player_input.value
            g_pool.player_input.value = 0
            on_char(player_input,True)

        # Get an image from the grabber
        frame = cap.get_frame()
        update_fps()

        for p in g.plugins:
            p.update(frame)

        g.plugins = [p for p in g.plugins if p.alive]

        g_pool.player_refresh.set()

        # render the screen
        clear_gl_screen()
        draw_gl_texture(frame.img)

        # render visual feedback from loaded plugins
        for p in g.plugins:
            p.gl_display()


        # update gaze point from shared variable pool and draw on screen. If both coords are 0: no pupil pos was detected.
        if not g_pool.gaze[:] == [0.,0.]:
            draw_gl_point_norm(g_pool.gaze[:],color=(1.,0.,0.,0.5))

        atb.draw()
        glfwSwapBuffers()


    # end while running and clean-up

    # de-init all running plugins
    for p in g.plugins:
        p.alive = False
    g.plugins = [p for p in g.plugins if p.alive]

    save('window_size',bar.window_size.value)
    save('calibration_type',bar.calibration_type.value)
    session_settings.close()

    cap.close()
    glfwCloseWindow()
    glfwTerminate()
    print "WORLD Process closed"