def world(g_pool, cap_src, cap_size): """world Creates a window, gl context. Grabs images from a capture. Receives Pupil coordinates from g_pool.pupil_queue Can run various plug-ins. """ # Callback functions def on_resize(window, w, h): active_window = glfwGetCurrentContext() glfwMakeContextCurrent(window) norm_size = normalize((w, h), glfwGetWindowSize(window)) fb_size = denormalize(norm_size, glfwGetFramebufferSize(window)) atb.TwWindowSize(*map(int, fb_size)) adjust_gl_view(w, h, window) glfwMakeContextCurrent(active_window) for p in g_pool.plugins: p.on_window_resize(window, w, h) def on_iconify(window, iconfied): if not isinstance(cap, FakeCapture): g_pool.update_textures.value = not iconfied def on_key(window, key, scancode, action, mods): if not atb.TwEventKeyboardGLFW(key, action): if action == GLFW_PRESS: if key == GLFW_KEY_ESCAPE: on_close(window) def on_char(window, char): if not atb.TwEventCharGLFW(char, 1): pass def on_button(window, button, action, mods): if not atb.TwEventMouseButtonGLFW(button, action): pos = glfwGetCursorPos(window) pos = normalize(pos, glfwGetWindowSize(world_window)) pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0])) # Position in img pixels for p in g_pool.plugins: p.on_click(pos, button, action) def on_pos(window, x, y): norm_pos = normalize((x, y), glfwGetWindowSize(window)) fb_x, fb_y = denormalize(norm_pos, glfwGetFramebufferSize(window)) if atb.TwMouseMotion(int(fb_x), int(fb_y)): pass def on_scroll(window, x, y): if not atb.TwMouseWheel(int(x)): pass def on_close(window): g_pool.quit.value = True logger.info('Process closing from window') # load session persistent settings session_settings = Persistent_Dict( os.path.join(g_pool.user_dir, 'user_settings_world')) def load(var_name, default): return session_settings.get(var_name, default) def save(var_name, var): session_settings[var_name] = var # Initialize capture cap = autoCreateCapture(cap_src, cap_size, 24, timebase=g_pool.timebase) # Get an image from the grabber try: frame = cap.get_frame() except CameraCaptureError: logger.error("Could not retrieve image from capture") cap.close() return height, width = frame.img.shape[:2] # load last calibration data try: pt_cloud = np.load(os.path.join(g_pool.user_dir, 'cal_pt_cloud.npy')) logger.debug("Using calibration found in %s" % g_pool.user_dir) map_pupil = calibrate.get_map_from_cloud(pt_cloud, (width, height)) except: logger.debug("No calibration found.") def map_pupil(vector): """ 1 to 1 mapping """ return vector # any object we attach to the g_pool object *from now on* will only be visible to this process! # vars should be declared here to make them visible to the code reader. g_pool.plugins = [] g_pool.map_pupil = map_pupil g_pool.update_textures = c_bool(1) if isinstance(cap, FakeCapture): g_pool.update_textures.value = False g_pool.capture = cap g_pool.rec_name = recorder.get_auto_name() # helpers called by the main atb bar def update_fps(): old_time, bar.timestamp = bar.timestamp, time() dt = bar.timestamp - old_time if dt: bar.fps.value += .05 * (1. / dt - bar.fps.value) def set_window_size(mode, data): height, width = frame.img.shape[:2] ratio = (1, .75, .5, .25)[mode] w, h = int(width * ratio), int(height * ratio) glfwSetWindowSize(world_window, w, h) data.value = mode # update the bar.value def get_from_data(data): """ helper for atb getter and setter use """ return data.value def set_rec_name(val): if not val.value: g_pool.rec_name = recorder.get_auto_name() else: g_pool.rec_name = val.value def get_rec_name(): return create_string_buffer(g_pool.rec_name, 512) def open_calibration(selection, data): # prepare destruction of current ref_detector... and remove it for p in g_pool.plugins: if isinstance(p, calibration_routines.detector_by_index): p.alive = False g_pool.plugins = [p for p in g_pool.plugins if p.alive] new_ref_detector = calibration_routines.detector_by_index[selection]( g_pool, atb_pos=bar.next_atb_pos) g_pool.plugins.append(new_ref_detector) g_pool.plugins.sort(key=lambda p: p.order) # save the value for atb bar data.value = selection def toggle_record_video(): for p in g_pool.plugins: if isinstance(p, recorder.Recorder): p.alive = False return new_plugin = recorder.Recorder(g_pool, g_pool.rec_name, bar.fps.value, frame.img.shape, bar.record_eye.value, g_pool.eye_tx, bar.audio.value) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_show_calib_result(): for p in g_pool.plugins: if isinstance(p, Show_Calibration): p.alive = False return new_plugin = Show_Calibration(g_pool, frame.img.shape) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_server(): for p in g_pool.plugins: if isinstance(p, Pupil_Server): p.alive = False return new_plugin = Pupil_Server(g_pool, (10, 300)) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_remote(): for p in g_pool.plugins: if isinstance(p, Pupil_Remote): p.alive = False return new_plugin = Pupil_Remote(g_pool, (10, 360), on_char) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_ar(): for p in g_pool.plugins: if isinstance(p, Marker_Detector): p.alive = False return new_plugin = Marker_Detector(g_pool, (10, 400)) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def reset_timebase(): #the last frame from worldcam will be t0 g_pool.timebase.value = g_pool.capure.get_now() logger.info( "New timebase set to %s all timestamps will count from here now." % g_pool.timebase.value) atb.init() # add main controls ATB bar bar = atb.Bar(name="World", label="Controls", help="Scene controls", color=(50, 50, 50), alpha=100, valueswidth=150, text='light', position=(10, 10), refresh=.3, size=(300, 200)) bar.next_atb_pos = (10, 220) bar.fps = c_float(0.0) bar.timestamp = time() bar.calibration_type = c_int(load("calibration_type", 0)) bar.record_eye = c_bool(load("record_eye", 0)) bar.audio = c_int(load("audio", -1)) bar.window_size = c_int(load("window_size", 0)) window_size_enum = atb.enum("Display Size", { "Full": 0, "Medium": 1, "Half": 2, "Mini": 3 }) calibrate_type_enum = atb.enum("Calibration Method", calibration_routines.index_by_name) audio_enum = atb.enum("Audio Input", dict(Audio_Input_List())) bar.version = create_string_buffer(g_pool.version, 512) bar.add_var( "fps", bar.fps, step=1., readonly=True, help= "Refresh speed of this process. Especially during recording it should not drop below the camera set frame rate." ) bar.add_var( "display size", vtype=window_size_enum, setter=set_window_size, getter=get_from_data, data=bar.window_size, help="Resize the world window. This has no effect on the actual image." ) bar.add_var("calibration method", setter=open_calibration, getter=get_from_data, data=bar.calibration_type, vtype=calibrate_type_enum, group="Calibration", help="Please choose your desired calibration method.") bar.add_button("show calibration result", toggle_show_calib_result, group="Calibration", help="Click to show calibration result.") bar.add_var("session name", create_string_buffer(512), getter=get_rec_name, setter=set_rec_name, group="Recording", help="Give your recording session a custom name.") bar.add_button("record", toggle_record_video, key="r", group="Recording", help="Start/Stop Recording") bar.add_var("record eye", bar.record_eye, group="Recording", help="check to save raw video of eye") bar.add_var("record audio", bar.audio, vtype=audio_enum, group="Recording", help="Select from audio recording options.") bar.add_button( "start/stop marker tracking", toggle_ar, key="x", help="find markers in scene to map gaze onto referace surfaces") bar.add_button( "start/stop server", toggle_server, key="s", help= "the server broadcasts pupil and gaze positions locally or via network" ) bar.add_button("start/stop remote", toggle_remote, key="w", help="remote allows seding commad to pupil via network") bar.add_button( "set timebase to now", reset_timebase, help="this button allows the timestamps to count from now on.", key="t") bar.add_var( "update screen", g_pool.update_textures, help= "if you dont need to see the camera image updated, you can turn this of to reduce CPU load." ) bar.add_separator("Sep1") bar.add_var("version", bar.version, readonly=True) bar.add_var("exit", g_pool.quit) # add uvc camera controls ATB bar cap.create_atb_bar(pos=(320, 10)) # Initialize glfw glfwInit() world_window = glfwCreateWindow(width, height, "World", None, None) glfwMakeContextCurrent(world_window) # Register callbacks world_window glfwSetWindowSizeCallback(world_window, on_resize) glfwSetWindowCloseCallback(world_window, on_close) glfwSetWindowIconifyCallback(world_window, on_iconify) glfwSetKeyCallback(world_window, on_key) glfwSetCharCallback(world_window, on_char) glfwSetMouseButtonCallback(world_window, on_button) glfwSetCursorPosCallback(world_window, on_pos) glfwSetScrollCallback(world_window, on_scroll) #set the last saved window size set_window_size(bar.window_size.value, bar.window_size) on_resize(world_window, *glfwGetWindowSize(world_window)) glfwSetWindowPos(world_window, 0, 0) # gl_state settings basic_gl_setup() g_pool.image_tex = create_named_texture(frame.img) # refresh speed settings glfwSwapInterval(0) #load calibration plugin open_calibration(bar.calibration_type.value, bar.calibration_type) #load gaze_display plugin g_pool.plugins.append(Display_Recent_Gaze(g_pool)) # Event loop while not g_pool.quit.value: # Get an image from the grabber try: frame = cap.get_frame() except CameraCaptureError: logger.error("Capture from Camera Failed. Stopping.") break except EndofVideoFileError: logger.warning("Video File is done. Stopping") break update_fps() #a container that allows plugins to post and read events events = [] #receive and map pupil positions recent_pupil_positions = [] while not g_pool.pupil_queue.empty(): p = g_pool.pupil_queue.get() if p['norm_pupil'] is None: p['norm_gaze'] = None else: p['norm_gaze'] = g_pool.map_pupil(p['norm_pupil']) recent_pupil_positions.append(p) # allow each Plugin to do its work. for p in g_pool.plugins: p.update(frame, recent_pupil_positions, events) #check if a plugin need to be destroyed g_pool.plugins = [p for p in g_pool.plugins if p.alive] # render camera image glfwMakeContextCurrent(world_window) make_coord_system_norm_based() if g_pool.update_textures.value: draw_named_texture(g_pool.image_tex, frame.img) else: draw_named_texture(g_pool.image_tex) make_coord_system_pixel_based(frame.img.shape) # render visual feedback from loaded plugins for p in g_pool.plugins: p.gl_display() atb.draw() glfwSwapBuffers(world_window) glfwPollEvents() # de-init all running plugins for p in g_pool.plugins: p.alive = False #reading p.alive actually runs plug-in cleanup _ = p.alive save('window_size', bar.window_size.value) save('calibration_type', bar.calibration_type.value) save('record_eye', bar.record_eye.value) save('audio', bar.audio.value) session_settings.close() cap.close() atb.terminate() glfwDestroyWindow(world_window) glfwTerminate() logger.debug("Process done")
def world(g_pool,cap_src,cap_size): """world Creates a window, gl context. Grabs images from a capture. Receives Pupil coordinates from g_pool.pupil_queue Can run various plug-ins. """ # Callback functions def on_resize(window,w, h): active_window = glfwGetCurrentContext() glfwMakeContextCurrent(window) norm_size = normalize((w,h),glfwGetWindowSize(window)) fb_size = denormalize(norm_size,glfwGetFramebufferSize(window)) atb.TwWindowSize(*map(int,fb_size)) adjust_gl_view(w,h,window) glfwMakeContextCurrent(active_window) def on_iconify(window,iconfied): if not isinstance(cap,FakeCapture): g_pool.update_textures.value = not iconfied def on_key(window, key, scancode, action, mods): if not atb.TwEventKeyboardGLFW(key,action): if action == GLFW_PRESS: if key == GLFW_KEY_ESCAPE: on_close(window) def on_char(window,char): if not atb.TwEventCharGLFW(char,1): pass def on_button(window,button, action, mods): if not atb.TwEventMouseButtonGLFW(button,action): pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(world_window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels for p in g_pool.plugins: p.on_click(pos,button,action) def on_pos(window,x, y): norm_pos = normalize((x,y),glfwGetWindowSize(window)) fb_x,fb_y = denormalize(norm_pos,glfwGetFramebufferSize(window)) if atb.TwMouseMotion(int(fb_x),int(fb_y)): pass def on_scroll(window,x,y): if not atb.TwMouseWheel(int(x)): pass def on_close(window): g_pool.quit.value = True logger.info('Process closing from window') # load session persistent settings session_settings = shelve.open(os.path.join(g_pool.user_dir,'user_settings_world'),protocol=2) def load(var_name,default): return session_settings.get(var_name,default) def save(var_name,var): session_settings[var_name] = var # load last calibration data try: pt_cloud = np.load(os.path.join(g_pool.user_dir,'cal_pt_cloud.npy')) logger.info("Using calibration found in %s" %g_pool.user_dir) map_pupil = calibrate.get_map_from_cloud(pt_cloud,(width,height)) except: logger.info("No calibration found.") def map_pupil(vector): """ 1 to 1 mapping """ return vector # any object we attach to the g_pool object now will only be visible to this process! # vars should be declared here to make them visible to the reader. g_pool.plugins = [] g_pool.map_pupil = map_pupil g_pool.update_textures = c_bool(1) # Initialize capture cap = autoCreateCapture(cap_src, cap_size, 24, timebase=g_pool.timebase) if isinstance(cap,FakeCapture): g_pool.update_textures.value = False # Get an image from the grabber try: frame = cap.get_frame() except CameraCaptureError: logger.error("Could not retrieve image from capture") cap.close() return height,width = frame.img.shape[:2] g_pool.capture = cap # helpers called by the main atb bar def update_fps(): old_time, bar.timestamp = bar.timestamp, time() dt = bar.timestamp - old_time if dt: bar.fps.value += .05 * (1. / dt - bar.fps.value) def set_window_size(mode,data): height,width = frame.img.shape[:2] ratio = (1,.75,.5,.25)[mode] w,h = int(width*ratio),int(height*ratio) glfwSetWindowSize(world_window,w,h) data.value=mode # update the bar.value def get_from_data(data): """ helper for atb getter and setter use """ return data.value def open_calibration(selection,data): # prepare destruction of current ref_detector... and remove it for p in g_pool.plugins: if isinstance(p,calibration_routines.detector_by_index): p.alive = False g_pool.plugins = [p for p in g_pool.plugins if p.alive] new_ref_detector = calibration_routines.detector_by_index[selection](g_pool,atb_pos=bar.next_atb_pos) g_pool.plugins.append(new_ref_detector) g_pool.plugins.sort(key=lambda p: p.order) # save the value for atb bar data.value=selection def toggle_record_video(): for p in g_pool.plugins: if isinstance(p,recorder.Recorder): p.alive = False return # set up folder within recordings named by user input in atb if not bar.rec_name.value: bar.rec_name.value = recorder.get_auto_name() new_plugin = recorder.Recorder(g_pool,bar.rec_name.value, bar.fps.value, frame.img.shape, bar.record_eye.value, g_pool.eye_tx) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_show_calib_result(): for p in g_pool.plugins: if isinstance(p,Show_Calibration): p.alive = False return new_plugin = Show_Calibration(g_pool,frame.img.shape) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_server(): for p in g_pool.plugins: if isinstance(p,Pupil_Server): p.alive = False return new_plugin = Pupil_Server(g_pool,(10,300)) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def toggle_ar(): for p in g_pool.plugins: if isinstance(p,Marker_Detector): p.alive = False return new_plugin = Marker_Detector(g_pool,(10,400)) g_pool.plugins.append(new_plugin) g_pool.plugins.sort(key=lambda p: p.order) def reset_timebase(): #the last frame from worldcam will be t0 g_pool.timebase.value = cap.get_now() logger.info("New timebase set to %s all timestamps will count from here now."%g_pool.timebase.value) atb.init() # add main controls ATB bar bar = atb.Bar(name = "World", label="Controls", help="Scene controls", color=(50, 50, 50), alpha=100,valueswidth=150, text='light', position=(10, 10),refresh=.3, size=(300, 200)) bar.next_atb_pos = (10,220) bar.fps = c_float(0.0) bar.timestamp = time() bar.calibration_type = c_int(load("calibration_type",0)) bar.record_eye = c_bool(load("record_eye",0)) bar.window_size = c_int(load("window_size",0)) window_size_enum = atb.enum("Display Size",{"Full":0, "Medium":1,"Half":2,"Mini":3}) calibrate_type_enum = atb.enum("Calibration Method",calibration_routines.index_by_name) bar.rec_name = create_string_buffer(512) bar.version = create_string_buffer(g_pool.version,512) bar.rec_name.value = recorder.get_auto_name() bar.add_var("fps", bar.fps, step=1., readonly=True, help="Refresh speed of this process. Especially during recording it should not drop below the camera set frame rate.") bar.add_var("display size", vtype=window_size_enum,setter=set_window_size,getter=get_from_data,data=bar.window_size,help="Resize the world window. This has no effect on the actual image.") bar.add_var("calibration method",setter=open_calibration,getter=get_from_data,data=bar.calibration_type, vtype=calibrate_type_enum,group="Calibration", help="Please choose your desired calibration method.") bar.add_button("show calibration result",toggle_show_calib_result, group="Calibration", help="Click to show calibration result.") bar.add_var("session name",bar.rec_name, group="Recording", help="Give your recording session a custom name.") bar.add_button("record", toggle_record_video, key="r", group="Recording", help="Start/Stop Recording") bar.add_var("record eye", bar.record_eye, group="Recording", help="check to save raw video of eye") bar.add_button("start/stop marker tracking",toggle_ar,key="x",help="find markers in scene to map gaze onto referace surfaces") bar.add_button("start/stop server",toggle_server,key="s",help="the server broadcasts pupil and gaze positions locally or via network") bar.add_button("set timebase to now",reset_timebase,help="this button allows the timestamps to count from now on.",key="t") bar.add_var("update screen", g_pool.update_textures,help="if you dont need to see the camera image updated, you can turn this of to reduce CPU load.") bar.add_separator("Sep1") bar.add_var("version",bar.version, readonly=True) bar.add_var("exit", g_pool.quit) # add uvc camera controls ATB bar cap.create_atb_bar(pos=(320,10)) # Initialize glfw glfwInit() world_window = glfwCreateWindow(width, height, "World", None, None) glfwMakeContextCurrent(world_window) # Register callbacks world_window glfwSetWindowSizeCallback(world_window,on_resize) glfwSetWindowCloseCallback(world_window,on_close) glfwSetWindowIconifyCallback(world_window,on_iconify) glfwSetKeyCallback(world_window,on_key) glfwSetCharCallback(world_window,on_char) glfwSetMouseButtonCallback(world_window,on_button) glfwSetCursorPosCallback(world_window,on_pos) glfwSetScrollCallback(world_window,on_scroll) #set the last saved window size set_window_size(bar.window_size.value,bar.window_size) on_resize(world_window, *glfwGetWindowSize(world_window)) glfwSetWindowPos(world_window,0,0) # gl_state settings basic_gl_setup() g_pool.image_tex = create_named_texture(frame.img) # refresh speed settings glfwSwapInterval(0) #load calibration plugin open_calibration(bar.calibration_type.value,bar.calibration_type) #load gaze_display plugin g_pool.plugins.append(Display_Recent_Gaze(g_pool)) # Event loop while not g_pool.quit.value: # Get an image from the grabber try: frame = cap.get_frame() except CameraCaptureError: logger.error("Capture from Camera Failed. Stopping.") break except EndofVideoFileError: logger.warning("Video File is done. Stopping") break update_fps() #a container that allows plugins to post and read events events = [] #receive and map pupil positions recent_pupil_positions = [] while not g_pool.pupil_queue.empty(): p = g_pool.pupil_queue.get() if p['norm_pupil'] is None: p['norm_gaze'] = None else: p['norm_gaze'] = g_pool.map_pupil(p['norm_pupil']) recent_pupil_positions.append(p) # allow each Plugin to do its work. for p in g_pool.plugins: p.update(frame,recent_pupil_positions,events) #check if a plugin need to be destroyed g_pool.plugins = [p for p in g_pool.plugins if p.alive] # render camera image glfwMakeContextCurrent(world_window) make_coord_system_norm_based() if g_pool.update_textures.value: draw_named_texture(g_pool.image_tex,frame.img) else: draw_named_texture(g_pool.image_tex) make_coord_system_pixel_based(frame.img.shape) # render visual feedback from loaded plugins for p in g_pool.plugins: p.gl_display() atb.draw() glfwSwapBuffers(world_window) glfwPollEvents() # de-init all running plugins for p in g_pool.plugins: p.alive = False #reading p.alive actually runs plug-in cleanup _ = p.alive save('window_size',bar.window_size.value) save('calibration_type',bar.calibration_type.value) save('record_eye',bar.record_eye.value) session_settings.close() cap.close() atb.terminate() glfwDestroyWindow(world_window) glfwTerminate() logger.debug("Process done")
def eye(g_pool, cap_src, cap_size): """ Creates a window, gl context. Grabs images from a capture. Streams Pupil coordinates into g_pool.pupil_queue """ # modify the root logger for this process logger = logging.getLogger() # remove inherited handlers logger.handlers = [] # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(g_pool.user_dir, 'eye.log'), mode='w') fh.setLevel(logging.INFO) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.WARNING) # create formatter and add it to the handlers formatter = logging.Formatter( 'EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) formatter = logging.Formatter( 'E Y E Process [%(levelname)s] %(name)s : %(message)s') ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) # create logger for the context of this function logger = logging.getLogger(__name__) # Callback functions def on_resize(window, w, h): adjust_gl_view(w, h) atb.TwWindowSize(w, h) def on_key(window, key, scancode, action, mods): if not atb.TwEventKeyboardGLFW(key, int(action == GLFW_PRESS)): if action == GLFW_PRESS: if key == GLFW_KEY_ESCAPE: on_close(window) def on_char(window, char): if not atb.TwEventCharGLFW(char, 1): pass def on_button(window, button, action, mods): if not atb.TwEventMouseButtonGLFW(button, int(action == GLFW_PRESS)): if action == GLFW_PRESS: pos = glfwGetCursorPos(window) pos = normalize(pos, glfwGetWindowSize(window)) pos = denormalize( pos, (frame.img.shape[1], frame.img.shape[0])) # pos in frame.img pixels u_r.setStart(pos) bar.draw_roi.value = 1 else: bar.draw_roi.value = 0 def on_pos(window, x, y): if atb.TwMouseMotion(int(x), int(y)): pass if bar.draw_roi.value == 1: pos = x, y pos = normalize(pos, glfwGetWindowSize(window)) pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0])) # pos in frame.img pixels u_r.setEnd(pos) def on_scroll(window, x, y): if not atb.TwMouseWheel(int(x)): pass def on_close(window): g_pool.quit.value = True logger.info('Process closing from window') # Helper functions called by the main atb bar def start_roi(): bar.display.value = 1 bar.draw_roi.value = 2 def update_fps(): old_time, bar.timestamp = bar.timestamp, time() dt = bar.timestamp - old_time if dt: bar.fps.value += .05 * (1. / dt - bar.fps.value) bar.dt.value = dt def get_from_data(data): """ helper for atb getter and setter use """ return data.value # load session persistent settings session_settings = shelve.open(os.path.join(g_pool.user_dir, 'user_settings_eye'), protocol=2) def load(var_name, default): return session_settings.get(var_name, default) def save(var_name, var): session_settings[var_name] = var # Initialize capture cap = autoCreateCapture(cap_src, cap_size) if cap is None: logger.error("Did not receive valid Capture") return # check if it works frame = cap.get_frame() if frame.img is None: logger.error("Could not retrieve image from capture") cap.close() return height, width = frame.img.shape[:2] cap.auto_rewind = False u_r = Roi(frame.img.shape) u_r.set(load('roi', default=None)) writer = None pupil_detector = Canny_Detector(g_pool) atb.init() # Create main ATB Controls bar = atb.Bar(name="Eye", label="Display", help="Scene controls", color=(50, 50, 50), alpha=100, text='light', position=(10, 10), refresh=.3, size=(200, 100)) bar.fps = c_float(0.0) bar.timestamp = time() bar.dt = c_float(0.0) bar.sleep = c_float(0.0) bar.display = c_int(load('bar.display', 0)) bar.draw_pupil = c_bool(load('bar.draw_pupil', True)) bar.draw_roi = c_int(0) dispay_mode_enum = atb.enum("Mode", { "Camera Image": 0, "Region of Interest": 1, "Algorithm": 2 }) bar.add_var("FPS", bar.fps, step=1., readonly=True) bar.add_var("Mode", bar.display, vtype=dispay_mode_enum, help="select the view-mode") bar.add_var("Show_Pupil_Point", bar.draw_pupil) bar.add_button("Draw_ROI", start_roi, help="drag on screen to select a region of interest") bar.add_var("SlowDown", bar.sleep, step=0.01, min=0.0) bar.add_var("SaveSettings&Exit", g_pool.quit) cap.create_atb_bar(pos=(220, 10)) # create a bar for the detector pupil_detector.create_atb_bar(pos=(10, 120)) glfwInit() window = glfwCreateWindow(width, height, "Eye", None, None) glfwMakeContextCurrent(window) # Register callbacks window glfwSetWindowSizeCallback(window, on_resize) glfwSetWindowCloseCallback(window, on_close) glfwSetKeyCallback(window, on_key) glfwSetCharCallback(window, on_char) glfwSetMouseButtonCallback(window, on_button) glfwSetCursorPosCallback(window, on_pos) glfwSetScrollCallback(window, on_scroll) glfwSetWindowPos(window, 800, 0) on_resize(window, width, height) # gl_state settings basic_gl_setup() # refresh speed settings glfwSwapInterval(0) # event loop while not g_pool.quit.value: frame = cap.get_frame() if frame.img is None: break update_fps() sleep(bar.sleep.value) # for debugging only if pupil_detector.should_sleep: sleep(16) pupil_detector.should_sleep = False ### RECORDING of Eye Video (on demand) ### # Setup variables and lists for recording if g_pool.eye_rx.poll(): command = g_pool.eye_rx.recv() if command is not None: record_path = command logger.info("Will save eye video to: %(record_path)s") video_path = os.path.join(record_path, "eye.avi") timestamps_path = os.path.join(record_path, "eye_timestamps.npy") writer = cv2.VideoWriter( video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (frame.img.shape[1], frame.img.shape[0])) timestamps = [] else: logger.info("Done recording eye.") writer = None np.save(timestamps_path, np.asarray(timestamps)) del timestamps if writer: writer.write(frame.img) timestamps.append(frame.timestamp) # pupil ellipse detection result = pupil_detector.detect(frame, user_roi=u_r, visualize=bar.display.value == 2) # stream the result g_pool.pupil_queue.put(result) # VISUALIZATION direct visualizations on the frame.img data if bar.display.value == 1: # and a solid (white) frame around the user defined ROI r_img = frame.img[u_r.lY:u_r.uY, u_r.lX:u_r.uX] r_img[:, 0] = 255, 255, 255 r_img[:, -1] = 255, 255, 255 r_img[0, :] = 255, 255, 255 r_img[-1, :] = 255, 255, 255 # GL-drawing clear_gl_screen() draw_gl_texture(frame.img) if result['norm_pupil'] is not None and bar.draw_pupil.value: if result.has_key('axes'): pts = cv2.ellipse2Poly( (int(result['center'][0]), int(result['center'][1])), (int(result["axes"][0] / 2), int(result["axes"][1] / 2)), int(result["angle"]), 0, 360, 15) draw_gl_polyline(pts, (1., 0, 0, .5)) draw_gl_point_norm(result['norm_pupil'], color=(1., 0., 0., 0.5)) atb.draw() glfwSwapBuffers(window) glfwPollEvents() # END while running # in case eye reconding was still runnnig: Save&close if writer: logger.info("Done recording eye.") writer = None np.save(timestamps_path, np.asarray(timestamps)) # save session persistent settings save('roi', u_r.get()) save('bar.display', bar.display.value) save('bar.draw_pupil', bar.draw_pupil.value) session_settings.close() pupil_detector.cleanup() cap.close() atb.terminate() glfwDestroyWindow(window) glfwTerminate() #flushing queue incase world process did not exit gracefully while not g_pool.pupil_queue.empty(): g_pool.pupil_queue.get() g_pool.pupil_queue.close() logger.debug("Process done")
def eye(g_pool,cap_src,cap_size): """ Creates a window, gl context. Grabs images from a capture. Streams Pupil coordinates into g_pool.pupil_queue """ # modify the root logger for this process logger = logging.getLogger() # remove inherited handlers logger.handlers = [] # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(g_pool.user_dir,'eye.log'),mode='w') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.WARNING) # create formatter and add it to the handlers formatter = logging.Formatter('EYE Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) formatter = logging.Formatter('E Y E Process [%(levelname)s] %(name)s : %(message)s') ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) # create logger for the context of this function logger = logging.getLogger(__name__) # Callback functions def on_resize(window,w, h): adjust_gl_view(w,h,window) norm_size = normalize((w,h),glfwGetWindowSize(window)) fb_size = denormalize(norm_size,glfwGetFramebufferSize(window)) atb.TwWindowSize(*map(int,fb_size)) def on_key(window, key, scancode, action, mods): if not atb.TwEventKeyboardGLFW(key,int(action == GLFW_PRESS)): if action == GLFW_PRESS: if key == GLFW_KEY_ESCAPE: on_close(window) def on_char(window,char): if not atb.TwEventCharGLFW(char,1): pass def on_button(window,button, action, mods): if not atb.TwEventMouseButtonGLFW(button,int(action == GLFW_PRESS)): if action == GLFW_PRESS: pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels u_r.setStart(pos) bar.draw_roi.value = 1 else: bar.draw_roi.value = 0 def on_pos(window,x, y): norm_pos = normalize((x,y),glfwGetWindowSize(window)) fb_x,fb_y = denormalize(norm_pos,glfwGetFramebufferSize(window)) if atb.TwMouseMotion(int(fb_x),int(fb_y)): pass if bar.draw_roi.value == 1: pos = denormalize(norm_pos,(frame.img.shape[1],frame.img.shape[0]) ) # pos in frame.img pixels u_r.setEnd(pos) def on_scroll(window,x,y): if not atb.TwMouseWheel(int(x)): pass def on_close(window): g_pool.quit.value = True logger.info('Process closing from window') # Helper functions called by the main atb bar def start_roi(): bar.display.value = 1 bar.draw_roi.value = 2 def update_fps(): old_time, bar.timestamp = bar.timestamp, time() dt = bar.timestamp - old_time if dt: bar.fps.value += .05 * (1. / dt - bar.fps.value) bar.dt.value = dt def get_from_data(data): """ helper for atb getter and setter use """ return data.value # load session persistent settings session_settings = shelve.open(os.path.join(g_pool.user_dir,'user_settings_eye'),protocol=2) def load(var_name,default): return session_settings.get(var_name,default) def save(var_name,var): session_settings[var_name] = var # Initialize capture cap = autoCreateCapture(cap_src, cap_size,timebase=g_pool.timebase) if cap is None: logger.error("Did not receive valid Capture") return # check if it works frame = cap.get_frame() if frame.img is None: logger.error("Could not retrieve image from capture") cap.close() return height,width = frame.img.shape[:2] u_r = Roi(frame.img.shape) u_r.set(load('roi',default=None)) writer = None pupil_detector = Canny_Detector(g_pool) atb.init() # Create main ATB Controls bar = atb.Bar(name = "Eye", label="Display", help="Scene controls", color=(50, 50, 50), alpha=100, text='light', position=(10, 10),refresh=.3, size=(200, 100)) bar.fps = c_float(0.0) bar.timestamp = time() bar.dt = c_float(0.0) bar.sleep = c_float(0.0) bar.display = c_int(load('bar.display',0)) bar.draw_pupil = c_bool(load('bar.draw_pupil',True)) bar.draw_roi = c_int(0) dispay_mode_enum = atb.enum("Mode",{"Camera Image":0, "Region of Interest":1, "Algorithm":2, "CPU Save": 3}) bar.add_var("FPS",bar.fps, step=1.,readonly=True) bar.add_var("Mode", bar.display,vtype=dispay_mode_enum, help="select the view-mode") bar.add_var("Show_Pupil_Point", bar.draw_pupil) bar.add_button("Draw_ROI", start_roi, help="drag on screen to select a region of interest") bar.add_var("SlowDown",bar.sleep, step=0.01,min=0.0) bar.add_var("SaveSettings&Exit", g_pool.quit) cap.create_atb_bar(pos=(220,10)) # create a bar for the detector pupil_detector.create_atb_bar(pos=(10,120)) glfwInit() window = glfwCreateWindow(width, height, "Eye", None, None) glfwMakeContextCurrent(window) # Register callbacks window glfwSetWindowSizeCallback(window,on_resize) glfwSetWindowCloseCallback(window,on_close) glfwSetKeyCallback(window,on_key) glfwSetCharCallback(window,on_char) glfwSetMouseButtonCallback(window,on_button) glfwSetCursorPosCallback(window,on_pos) glfwSetScrollCallback(window,on_scroll) glfwSetWindowPos(window,800,0) on_resize(window,width,height) # gl_state settings basic_gl_setup() # refresh speed settings glfwSwapInterval(0) # event loop while not g_pool.quit.value: # Get an image from the grabber try: frame = cap.get_frame() except CameraCaptureError: logger.error("Capture from Camera Failed. Stopping.") break except EndofVideoFileError: logger.warning("Video File is done. Stopping") break update_fps() sleep(bar.sleep.value) # for debugging only ### RECORDING of Eye Video (on demand) ### # Setup variables and lists for recording if g_pool.eye_rx.poll(): command = g_pool.eye_rx.recv() if command is not None: record_path = command logger.info("Will save eye video to: %s"%record_path) video_path = os.path.join(record_path, "eye.avi") timestamps_path = os.path.join(record_path, "eye_timestamps.npy") writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), bar.fps.value, (frame.img.shape[1], frame.img.shape[0])) timestamps = [] else: logger.info("Done recording eye.") writer = None np.save(timestamps_path,np.asarray(timestamps)) del timestamps if writer: writer.write(frame.img) timestamps.append(frame.timestamp) # pupil ellipse detection result = pupil_detector.detect(frame,user_roi=u_r,visualize=bar.display.value == 2) # stream the result g_pool.pupil_queue.put(result) # VISUALIZATION direct visualizations on the frame.img data if bar.display.value == 1: # and a solid (white) frame around the user defined ROI r_img = frame.img[u_r.lY:u_r.uY,u_r.lX:u_r.uX] r_img[:,0] = 255,255,255 r_img[:,-1]= 255,255,255 r_img[0,:] = 255,255,255 r_img[-1,:]= 255,255,255 # GL-drawing clear_gl_screen() draw_gl_texture(frame.img,update=bar.display.value != 3) if result['norm_pupil'] is not None and bar.draw_pupil.value: if result.has_key('axes'): pts = cv2.ellipse2Poly( (int(result['center'][0]),int(result['center'][1])), (int(result["axes"][0]/2),int(result["axes"][1]/2)), int(result["angle"]),0,360,15) draw_gl_polyline(pts,(1.,0,0,.5)) draw_gl_point_norm(result['norm_pupil'],color=(1.,0.,0.,0.5)) atb.draw() glfwSwapBuffers(window) glfwPollEvents() # END while running # in case eye reconding was still runnnig: Save&close if writer: logger.info("Done recording eye.") writer = None np.save(timestamps_path,np.asarray(timestamps)) # save session persistent settings save('roi',u_r.get()) save('bar.display',bar.display.value) save('bar.draw_pupil',bar.draw_pupil.value) session_settings.close() pupil_detector.cleanup() cap.close() atb.terminate() glfwDestroyWindow(window) glfwTerminate() #flushing queue incase world process did not exit gracefully while not g_pool.pupil_queue.empty(): g_pool.pupil_queue.get() g_pool.pupil_queue.close() logger.debug("Process done")
def eye(g_pool): """ this process needs a docstring """ # # callback functions def on_resize(w, h): atb.TwWindowSize(w, h) adjust_gl_view(w, h) def on_key(key, pressed): if not atb.TwEventKeyboardGLFW(key, pressed): if pressed: if key == GLFW_KEY_ESC: on_close() def on_char(char, pressed): if not atb.TwEventCharGLFW(char, pressed): pass def on_button(button, pressed): if not atb.TwEventMouseButtonGLFW(button, pressed): if bar.draw_roi.value: if pressed: pos = glfwGetMousePos() pos = normalize(pos, glfwGetWindowSize()) pos = denormalize(pos, (img.shape[1], img.shape[0])) # pos in img pixels r.setStart(pos) bar.draw_roi.value = 1 else: bar.draw_roi.value = 0 def on_pos(x, y): if atb.TwMouseMotion(x, y): pass if bar.draw_roi.value == 1: pos = glfwGetMousePos() pos = normalize(pos, glfwGetWindowSize()) pos = denormalize(pos, (img.shape[1], img.shape[0])) # pos in img pixels r.setEnd(pos) def on_scroll(pos): if not atb.TwMouseWheel(pos): pass def on_close(): g_pool.quit.value = True print "EYE Process closing from window" # initialize capture, check if it works cap = autoCreateCapture(g_pool.eye_src, g_pool.eye_size) if cap is None: print "EYE: Error could not create Capture" return s, img = cap.read_RGB() if not s: print "EYE: Error could not get image" return height, width = img.shape[:2] # pupil object pupil = Temp() pupil.norm_coords = (0.0, 0.0) pupil.image_coords = (0.0, 0.0) pupil.ellipse = None pupil.gaze_coords = (0.0, 0.0) try: pupil.pt_cloud = np.load("cal_pt_cloud.npy") map_pupil = get_map_from_cloud(pupil.pt_cloud, g_pool.world_size) ###world video size here except: pupil.pt_cloud = None def map_pupil(vector): return vector r = Roi(img.shape) p_r = Roi(img.shape) # local object l_pool = Temp() l_pool.calib_running = False l_pool.record_running = False l_pool.record_positions = [] l_pool.record_path = None l_pool.writer = None l_pool.region_r = 20 atb.init() bar = Bar( "Eye", g_pool, dict( label="Controls", help="eye detection controls", color=(50, 50, 50), alpha=100, text="light", position=(10, 10), refresh=0.1, size=(200, 300), ), ) # add 4vl2 camera controls to a seperate ATB bar if cap.controls is not None: c_bar = atb.Bar( name="Camera_Controls", label=cap.name, help="UVC Camera Controls", color=(50, 50, 50), alpha=100, text="light", position=(220, 10), refresh=2.0, size=(200, 200), ) # c_bar.add_var("auto_refresher",vtype=atb.TW_TYPE_BOOL8,getter=cap.uvc_refresh_all,setter=None,readonly=True) # c_bar.define(definition='visible=0', varname="auto_refresher") sorted_controls = [c for c in cap.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) for control in sorted_controls: name = control.atb_name if control.type == "bool": c_bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val) elif control.type == "int": c_bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val) c_bar.define(definition="min=" + str(control.min), varname=name) c_bar.define(definition="max=" + str(control.max), varname=name) c_bar.define(definition="step=" + str(control.step), varname=name) elif control.type == "menu": if control.menu is None: vtype = None else: vtype = atb.enum(name, control.menu) c_bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val) if control.menu is None: c_bar.define(definition="min=" + str(control.min), varname=name) c_bar.define(definition="max=" + str(control.max), varname=name) c_bar.define(definition="step=" + str(control.step), varname=name) else: pass if control.flags == "inactive": pass # c_bar.define(definition='readonly=1',varname=control.name) c_bar.add_button("refresh", cap.update_from_device) c_bar.add_button("load defaults", cap.load_defaults) else: c_bar = None # Initialize glfw glfwInit() glfwOpenWindow(width, height, 0, 0, 0, 8, 0, 0, GLFW_WINDOW) glfwSetWindowTitle("Eye") glfwSetWindowPos(800, 0) if isinstance(g_pool.eye_src, str): glfwSwapInterval(0) # turn of v-sync when using video as src for benchmarking # register callbacks glfwSetWindowSizeCallback(on_resize) glfwSetWindowCloseCallback(on_close) glfwSetKeyCallback(on_key) glfwSetCharCallback(on_char) glfwSetMouseButtonCallback(on_button) glfwSetMousePosCallback(on_pos) glfwSetMouseWheelCallback(on_scroll) # gl_state settings import OpenGL.GL as gl gl.glEnable(gl.GL_POINT_SMOOTH) gl.glPointSize(20) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) gl.glEnable(gl.GL_BLEND) del gl # event loop while glfwGetWindowParam(GLFW_OPENED) and not g_pool.quit.value: bar.update_fps() s, img = cap.read_RGB() sleep(bar.sleep.value) # for debugging only ###IMAGE PROCESSING gray_img = grayscale(img[r.lY : r.uY, r.lX : r.uX]) integral = cv2.integral(gray_img) integral = np.array(integral, dtype=c_float) x, y, w = eye_filter(integral) if w > 0: p_r.set((y, x, y + w, x + w)) else: p_r.set((0, 0, -1, -1)) # create view into the gray_img with the bounds of the rough pupil estimation pupil_img = gray_img[p_r.lY : p_r.uY, p_r.lX : p_r.uX] # pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)),iterations=2) if True: hist = cv2.calcHist( [pupil_img], [0], None, [256], [0, 256] ) # (images, channels, mask, histSize, ranges[, hist[, accumulate]]) bins = np.arange(hist.shape[0]) spikes = bins[hist[:, 0] > 40] # every color seen in more than 40 pixels if spikes.shape[0] > 0: lowest_spike = spikes.min() offset = 40 ##display the histogram sx, sy = 100, 1 colors = ((255, 0, 0), (0, 0, 255), (0, 255, 255)) h, w, chan = img.shape # normalize hist *= 1.0 / hist.max() for i, h in zip(bins, hist[:, 0]): c = colors[1] cv2.line(img, (w, int(i * sy)), (w - int(h * sx), int(i * sy)), c) cv2.line(img, (w, int(lowest_spike * sy)), (int(w - 0.5 * sx), int(lowest_spike * sy)), colors[0]) cv2.line( img, (w, int((lowest_spike + offset) * sy)), (int(w - 0.5 * sx), int((lowest_spike + offset) * sy)), colors[2], ) # # k-means on the histogram finds peaks but thats no good for us... # term_crit = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) # compactness, bestLabels, centers = cv2.kmeans(data=hist, K=2, criteria=term_crit, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS) # cv2.line(img,(0,1),(int(compactness),1),(0,0,0)) # good_cluster = np.argmax(centers) # # A = hist[bestLabels.ravel() == good_cluster] # # B = hist[bestLabels.ravel() != good_cluster] # bins = np.arange(hist.shape[0]) # good_bins = bins[bestLabels.ravel() == good_cluster] # good_bins_mean = good_bins.sum()/good_bins.shape[0] # good_bins_min = good_bins.min() # h,w,chan = img.shape # for h, i, label in zip(hist[:,0],range(hist.shape[0]), bestLabels.ravel()): # c = colors[label] # cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c) else: # direct k-means on the image is best but expensive Z = pupil_img[:: w / 30 + 1, :: w / 30 + 1].reshape((-1, 1)) Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 2.0) K = 5 ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) offset = 0 center.sort(axis=0) lowest_spike = int(center[1]) # # Now convert back into uint8, and make original image # center = np.uint8(center) # res = center[label.flatten()] # binary_img = res.reshape((pupil_img.shape)) # binary_img = bin_thresholding(binary_img,image_upper=res.min()+1) # bar.bin_thresh.value = res.min()+1 bar.bin_thresh.value = lowest_spike binary_img = bin_thresholding(pupil_img, image_upper=lowest_spike + offset) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) cv2.dilate(binary_img, kernel, binary_img, iterations=2) spec_mask = bin_thresholding(pupil_img, image_upper=250) cv2.erode(spec_mask, kernel, spec_mask, iterations=1) if bar.blur.value > 1: pupil_img = cv2.medianBlur(pupil_img, bar.blur.value) # create contours using Canny edge dectetion contours = cv2.Canny( pupil_img, bar.canny_thresh.value, bar.canny_thresh.value * bar.canny_ratio.value, apertureSize=bar.canny_aperture.value, ) # remove contours in areas not dark enough and where the glint (spectral refelction from IR leds) contours = cv2.min(contours, spec_mask) contours = cv2.min(contours, binary_img) # Ellipse fitting from countours result = fit_ellipse( img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX], contours, binary_img, target_size=bar.pupil_size.value, size_tolerance=bar.pupil_size_tolerance.value, ) # # Vizualizations overlay = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB) # create an RGB view onto the gray pupil ROI overlay[:, :, 0] = cv2.max(pupil_img, contours) # green channel overlay[:, :, 2] = cv2.max(pupil_img, binary_img) # blue channel overlay[:, :, 1] = cv2.min(pupil_img, spec_mask) # red channel # draw a blue dotted frame around the automatic pupil ROI in overlay... overlay[::2, 0] = 0, 0, 255 overlay[::2, -1] = 0, 0, 255 overlay[0, ::2] = 0, 0, 255 overlay[-1, ::2] = 0, 0, 255 # and a solid (white) frame around the user defined ROI gray_img[:, 0] = 255 gray_img[:, -1] = 255 gray_img[0, :] = 255 gray_img[-1, :] = 255 if bar.display.value == 0: img = img elif bar.display.value == 1: img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB) elif bar.display.value == 2: img[r.lY : r.uY, r.lX : r.uX] = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB) img[r.lY : r.uY, r.lX : r.uX][p_r.lY : p_r.uY, p_r.lX : p_r.uX] = overlay elif bar.display.value == 3: img = cv2.cvtColor(pupil_img, cv2.COLOR_GRAY2RGB) else: pass if result is not None: pupil.ellipse, others = result pupil.image_coords = r.add_vector(p_r.add_vector(pupil.ellipse["center"])) # update pupil size,angle and ratio for the ellipse filter algorithm bar.pupil_size.value = bar.pupil_size.value + 0.5 * (pupil.ellipse["major"] - bar.pupil_size.value) bar.pupil_ratio.value = bar.pupil_ratio.value + 0.7 * (pupil.ellipse["ratio"] - bar.pupil_ratio.value) bar.pupil_angle.value = bar.pupil_angle.value + 1.0 * (pupil.ellipse["angle"] - bar.pupil_angle.value) # if pupil found tighten the size tolerance bar.pupil_size_tolerance.value -= 1 bar.pupil_size_tolerance.value = max(10, min(50, bar.pupil_size_tolerance.value)) # clamp pupil size bar.pupil_size.value = max(20, min(300, bar.pupil_size.value)) # normalize pupil.norm_coords = normalize(pupil.image_coords, (img.shape[1], img.shape[0]), flip_y=True) # from pupil to gaze pupil.gaze_coords = map_pupil(pupil.norm_coords) g_pool.gaze_x.value, g_pool.gaze_y.value = pupil.gaze_coords else: pupil.ellipse = None g_pool.gaze_x.value, g_pool.gaze_y.value = 0.0, 0.0 pupil.gaze_coords = None # whithout this line the last know pupil position is recorded if none is found bar.pupil_size_tolerance.value += 1 ###CALIBRATION### # Initialize Calibration (setup variables and lists) if g_pool.calibrate.value and not l_pool.calib_running: l_pool.calib_running = True pupil.pt_cloud = [] # While Calibrating... if l_pool.calib_running and ((g_pool.ref_x.value != 0) or (g_pool.ref_y.value != 0)) and pupil.ellipse: pupil.pt_cloud.append([pupil.norm_coords[0], pupil.norm_coords[1], g_pool.ref_x.value, g_pool.ref_y.value]) # Calculate mapping coefs if not g_pool.calibrate.value and l_pool.calib_running: l_pool.calib_running = 0 if pupil.pt_cloud: # some data was actually collected print "Calibrating with", len(pupil.pt_cloud), "collected data points." map_pupil = get_map_from_cloud(np.array(pupil.pt_cloud), g_pool.world_size, verbose=True) np.save("cal_pt_cloud.npy", np.array(pupil.pt_cloud)) ###RECORDING### # Setup variables and lists for recording if g_pool.pos_record.value and not l_pool.record_running: l_pool.record_path = g_pool.eye_rx.recv() print "l_pool.record_path: ", l_pool.record_path video_path = path.join(l_pool.record_path, "eye.avi") # FFV1 -- good speed lossless big file # DIVX -- good speed good compression medium file if bar.record_eye.value: l_pool.writer = cv2.VideoWriter( video_path, cv2.cv.CV_FOURCC(*"DIVX"), bar.fps.value, (img.shape[1], img.shape[0]) ) l_pool.record_positions = [] l_pool.record_running = True # While recording... if l_pool.record_running: if pupil.gaze_coords is not None: l_pool.record_positions.append( [ pupil.gaze_coords[0], pupil.gaze_coords[1], pupil.norm_coords[0], pupil.norm_coords[1], bar.dt, g_pool.frame_count_record.value, ] ) if l_pool.writer is not None: l_pool.writer.write(cv2.cvtColor(img, cv2.cv.COLOR_BGR2RGB)) # Done Recording: Save values and flip switch to off for recording if not g_pool.pos_record.value and l_pool.record_running: positions_path = path.join(l_pool.record_path, "gaze_positions.npy") cal_pt_cloud_path = path.join(l_pool.record_path, "cal_pt_cloud.npy") np.save(positions_path, np.asarray(l_pool.record_positions)) try: np.save(cal_pt_cloud_path, np.asarray(pupil.pt_cloud)) except: print "Warning: No calibration data associated with this recording." l_pool.writer = None l_pool.record_running = False ### GL-drawing clear_gl_screen() draw_gl_texture(img) if bar.draw_pupil and pupil.ellipse: pts = cv2.ellipse2Poly( (int(pupil.image_coords[0]), int(pupil.image_coords[1])), (int(pupil.ellipse["axes"][0] / 2), int(pupil.ellipse["axes"][1] / 2)), int(pupil.ellipse["angle"]), 0, 360, 15, ) draw_gl_polyline(pts, (1.0, 0, 0, 0.5)) draw_gl_point_norm(pupil.norm_coords, (1.0, 0.0, 0.0, 0.5)) atb.draw() glfwSwapBuffers() # end while running print "EYE Process closed" r.save() bar.save() atb.terminate() glfwCloseWindow() glfwTerminate()