def handle_frame(next): if next != cap.get_frame_index(): #we need to seek: logger.debug("Seeking to Frame %s" %next) try: cap.seek_to_frame(next) except FileSeekError: #could not seek to requested position logger.warning("Could not evaluate frame: %s."%next) visited_list[next] = True # this frame is now visited. q.put((next,[])) # we cannot look at the frame, report no detection return #seeking invalidates prev markers for the detector markers[:] = [] try: frame = cap.get_frame() except EndofVideoFileError: logger.debug("Video File's last frame(s) not accesible") #could not read frame logger.warning("Could not evaluate frame: %s."%next) visited_list[next] = True # this frame is now visited. q.put((next,[])) # we cannot look at the frame, report no detection return markers[:] = detect_markers_robust(frame.gray, grid_size = 5, prev_markers=markers, min_marker_perimeter=min_marker_perimeter, aperture=aperture, visualize=0, true_detect_every_frame=1) visited_list[frame.index] = True q.put((frame.index,markers[:])) #object passed will only be pickeled when collected from other process! need to make a copy ot avoid overwrite!!!
def _detect_markers(self, frame): gray = frame.gray if self.robust_detection: markers = marker_det.detect_markers_robust( gray, grid_size=5, aperture=11, prev_markers=self.previous_markers, true_detect_every_frame=3, min_marker_perimeter=self.marker_min_perimeter, invert_image=self.inverted_markers, ) else: markers = marker_det.detect_markers( gray, grid_size=5, aperture=11, min_marker_perimeter=self.marker_min_perimeter, ) # Robust marker detection requires previous markers to be in a different # format than the surface tracker. self.previous_markers = markers markers = [ Square_Marker_Detection( m["id"], m["id_confidence"], m["verts"], m["perimeter"] ) for m in markers ] markers = self._remove_duplicate_markers(markers) self.markers_unfiltered = markers self.markers = self._filter_markers(markers)
def handle_frame(next): if next != cap.get_frame_index(): #we need to seek: logger.debug("Seeking to Frame %s" %next) try: cap.seek_to_frame(next) except FileSeekError: #could not seek to requested position logger.warning("Could not evaluate frame: %s."%next) visited_list[next] = True # this frame is now visited. q.put((next,[])) # we cannot look at the frame, report no detection return #seeking invalidates prev markers for the detector markers[:] = [] try: frame = cap.get_frame_nowait() except EndofVideoFileError: logger.debug("Video File's last frame(s) not accesible") #could not read frame logger.warning("Could not evaluate frame: %s."%next) visited_list[next] = True # this frame is now visited. q.put((next,[])) # we cannot look at the frame, report no detection return markers[:] = detect_markers_robust(frame.gray, grid_size = 5, prev_markers=markers, min_marker_perimeter=min_marker_perimeter, aperture=aperture, visualize=0, true_detect_every_frame=1) visited_list[frame.index] = True q.put((frame.index,markers[:])) #object passed will only be pickeled when collected from other process! need to make a copy ot avoid overwrite!!!
def __call__(self, frame): if frame.index != self.prev_frame_idx + 1: self.prev_markers = [] markers = square_marker_detect.detect_markers_robust( frame.gray, grid_size=5, prev_markers=self.prev_markers, min_marker_perimeter=self.min_marker_perimeter, aperture=9, visualize=0, true_detect_every_frame=1, invert_image=self.inverted_markers, ) self.prev_markers = markers self.prev_frame_idx = frame.index markers = [ Square_Marker_Detection( m["id"], m["id_confidence"], m["verts"], m["perimeter"] ) for m in markers ] return markers
def update(self,frame,events): self.img_shape = frame.height,frame.width,3 if self.running: gray = frame.gray if self.robust_detection: self.markers = detect_markers_robust(gray, grid_size = 5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture, visualize=0, true_detect_every_frame=3, invert_image=self.invert_image) else: self.markers = detect_markers(gray, grid_size = 5, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture, visualize=0, invert_image=self.invert_image) if self.mode == "Show marker IDs": draw_markers(frame.gray,self.markers) events['surface'] = [] # locate surfaces for s in self.surfaces: s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter, self.locate_3d) if s.detected: events['surface'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(), 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist()}) if self.running: self.button.status_text = '%s/%s'%(len([s for s in self.surfaces if s.detected]),len(self.surfaces)) else: self.button.status_text = 'tracking paused' if self.mode == 'Show Markers and Surfaces': # edit surfaces by user if self.edit_surf_verts: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window),flip_y=True) for s,v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) #map recent gaze onto detected surfaces used for pupil server for s in self.surfaces: if s.detected: s.gaze_on_srf = [] for p in events.get('gaze_positions',[]): gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_pos']))) p['realtime gaze on ' + s.name] = gp_on_s s.gaze_on_srf.append(gp_on_s)
def update(self,frame,recent_pupil_positions,events): img = frame.img self.img_shape = frame.img.shape if self.robust_detection.value: self.markers = detect_markers_robust(img, grid_size = 5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture.value, visualize=0, true_detect_every_frame=3) else: self.markers = detect_markers_simple(img, grid_size = 5, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture.value, visualize=0) # locate surfaces for s in self.surfaces: s.locate(self.markers) if s.detected: events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp}) if self.draw_markers.value: draw_markers(img,self.markers) # edit surfaces by user if self.surface_edit_mode: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels for s,v_idx in self.edit_surfaces: if s.detected: pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True) new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) #map recent gaze onto detected surfaces used for pupil server for s in self.surfaces: if s.detected: s.gaze_on_srf = [] for p in recent_pupil_positions: if p['norm_pupil'] is not None: gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_gaze']))) p['realtime gaze on '+s.name] = gp_on_s s.gaze_on_srf.append(gp_on_s) #allow surfaces to open/close windows for s in self.surfaces: if s.window_should_close: s.close_window() if s.window_should_open: s.open_window()
def update(self, frame, events): self.img_shape = frame.height, frame.width, 3 if self.running: gray = frame.gray if self.robust_detection: self.markers = detect_markers_robust( gray, grid_size=5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture, visualize=0, true_detect_every_frame=3, ) else: self.markers = detect_markers( gray, grid_size=5, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture, visualize=0, ) if self.mode == "Show marker IDs": draw_markers(frame.img, self.markers) # locate surfaces for s in self.surfaces: s.locate(self.markers, self.locate_3d, self.camera_intrinsics) # if s.detected: # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp}) if self.running: self.button.status_text = "%s/%s" % (len([s for s in self.surfaces if s.detected]), len(self.surfaces)) else: self.button.status_text = "tracking paused" # edit surfaces by user if self.mode == "Surface edit mode": window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos, glfwGetWindowSize(window), flip_y=True) for s, v_idx in self.edit_surfaces: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx, new_pos) # map recent gaze onto detected surfaces used for pupil server for s in self.surfaces: if s.detected: s.gaze_on_srf = [] for p in events.get("gaze_positions", []): gp_on_s = tuple(s.img_to_ref_surface(np.array(p["norm_pos"]))) p["realtime gaze on " + s.name] = gp_on_s s.gaze_on_srf.append(gp_on_s)
def recent_events(self, events): frame = events.get('frame') if not frame: return self.img_shape = frame.height,frame.width,3 if self.running: gray = frame.gray if self.invert_image: gray = 255-gray if self.robust_detection: self.markers = detect_markers_robust( gray, grid_size = 5,aperture=self.aperture, prev_markers=self.markers, true_detect_every_frame=3, min_marker_perimeter=self.min_marker_perimeter) else: self.markers = detect_markers( gray, grid_size = 5,aperture=self.aperture, min_marker_perimeter=self.min_marker_perimeter) if self.mode == "Show marker IDs": draw_markers(frame.gray,self.markers) # locate surfaces, map gaze for s in self.surfaces: s.locate(self.markers,self.min_marker_perimeter,self.min_id_confidence, self.locate_3d) if s.detected: s.gaze_on_srf = s.map_data_to_surface(events.get('gaze_positions',[]),s.m_from_screen) s.update_gaze_history() else: s.gaze_on_srf =[] events['surfaces'] = [] for s in self.surfaces: if s.detected: events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(),'gaze_on_srf': s.gaze_on_srf, 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None}) if self.running: self.button.status_text = '{}/{}'.format(len([s for s in self.surfaces if s.detected]), len(self.surfaces)) else: self.button.status_text = 'tracking paused' if self.mode == 'Show Markers and Surfaces': # edit surfaces by user if self.edit_surf_verts: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window),flip_y=True) for s,v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos)
def recent_events(self, events): frame = events.get('frame') if not frame: return self.img_shape = frame.height,frame.width,3 if self.running: gray = frame.gray if self.invert_image: gray = 255-gray if self.robust_detection: self.markers = detect_markers_robust( gray, grid_size = 5,aperture=self.aperture, prev_markers=self.markers, true_detect_every_frame=3, min_marker_perimeter=self.min_marker_perimeter) else: self.markers = detect_markers( gray, grid_size = 5,aperture=self.aperture, min_marker_perimeter=self.min_marker_perimeter) if self.mode == "Show marker IDs": draw_markers(frame.gray,self.markers) # locate surfaces, map gaze for s in self.surfaces: s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence, self.locate_3d) if s.detected: s.gaze_on_srf = s.map_data_to_surface(events.get('gaze_positions',[]),s.m_from_screen) else: s.gaze_on_srf =[] events['surfaces'] = [] for s in self.surfaces: if s.detected: events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(),'gaze_on_srf': s.gaze_on_srf, 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None}) if self.running: self.button.status_text = '{}/{}'.format(len([s for s in self.surfaces if s.detected]), len(self.surfaces)) else: self.button.status_text = 'tracking paused' if self.mode == 'Show Markers and Surfaces': # edit surfaces by user if self.edit_surf_verts: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window),flip_y=True) for s,v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos)
def update(self,frame,recent_pupil_positions,events): img = frame.img self.img_shape = frame.img.shape if self.robust_detection.value: self.markers = detect_markers_robust(img,grid_size = 5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture.value, visualize=0, true_detect_every_frame=3) else: self.markers = detect_markers_simple(img,grid_size = 5,min_marker_perimeter=self.min_marker_perimeter,aperture=self.aperture.value,visualize=0) if self.draw_markers.value: draw_markers(img,self.markers) # print self.markers for s in self.surfaces: s.locate(self.markers) if s.detected: events.append({'type':'marker_ref_surface','name':s.name,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp}) if self.surface_edit_mode: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels for s,v_idx in self.edit_surfaces: if s.detected: pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True) new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) #map recent gaze onto detected surfaces used for pupil server for p in recent_pupil_positions: if p['norm_pupil'] is not None: for s in self.surfaces: if s.detected: p['realtime gaze on '+s.name] = tuple(s.img_to_ref_surface(np.array(p['norm_gaze']))) if self._window: # save a local copy for when we display gaze for debugging on ref surface self.recent_pupil_positions = recent_pupil_positions if self.window_should_close: self.close_window() if self.window_should_open: self.open_window()
def __call__(self, frame): markers = square_marker_detect.detect_markers_robust( frame.gray, grid_size=5, prev_markers=[], min_marker_perimeter=self.min_marker_perimeter, aperture=9, visualize=0, true_detect_every_frame=1, invert_image=self.inverted_markers, ) markers = [ Square_Marker_Detection( m["id"], m["id_confidence"], m["verts"], m["perimeter"] ) for m in markers ] return markers
def detect_markers_iter( self, gray_img, frame_index: int) -> typing.Iterable[Surface_Marker]: if Surface_Marker_Detector_Mode.SQUARE_MARKER not in self.marker_detector_modes: return [] if self.use_online_mode: true_detect_every_frame = 3 else: true_detect_every_frame = 1 # in offline mode we can get non-monotonic data, # in which case the previous data is invalid if frame_index != self.__previous_frame_index + 1: self.__previous_raw_markers = [] # TODO: Does this mean that seeking in the recording while the # surface is being detected will essentially compromise the data? As # in these cases we cannot use the previous frame data for inferring # better marker positions. But if we would not have seeked we could # have used this information! This looks like an inconsistency! grid_size = 5 aperture = 9 min_perimeter = self.marker_min_perimeter markers = square_marker_detect.detect_markers_robust( gray_img=gray_img, grid_size=grid_size, min_marker_perimeter=min_perimeter, aperture=aperture, prev_markers=self.__previous_raw_markers, true_detect_every_frame=true_detect_every_frame, invert_image=self.__inverted_markers, ) # Robust marker detection requires previous markers to be in a different # format than the surface tracker. self.__previous_raw_markers = markers self.__previous_frame_index = frame_index markers = map(Surface_Marker.from_square_tag_detection, markers) markers = filter(self._surface_marker_filter, markers) return markers
def detect_markers_iter( self, gray_img, frame_index: int) -> typing.Iterable[Surface_Marker]: if Surface_Marker_Detector_Mode.SQUARE_MARKER not in self.marker_detector_modes: return [] # If current frame does not follow the previous frame, forget previously detected markers if frame_index != self.__previous_frame_index + 1: self.__previous_raw_markers = [] grid_size = 5 aperture = 9 true_detect_every_frame = 3 min_perimeter = self.marker_min_perimeter if self.__robust_detection: markers = square_marker_detect.detect_markers_robust( gray_img=gray_img, grid_size=grid_size, min_marker_perimeter=min_perimeter, aperture=aperture, prev_markers=self.__previous_raw_markers, true_detect_every_frame=true_detect_every_frame, invert_image=self.__inverted_markers, ) else: markers = square_marker_detect.detect_markers( gray_img=gray_img, grid_size=grid_size, min_marker_perimeter=min_perimeter, aperture=aperture, ) # Robust marker detection requires previous markers to be in a different # format than the surface tracker. self.__previous_raw_markers = markers self.__previous_frame_index = frame_index markers = map(Surface_Marker.from_square_tag_detection, markers) markers = filter(self._surface_marker_filter, markers) return markers
def update(self, frame, events): self.img_shape = frame.height, frame.width, 3 if self.running: gray = frame.gray if self.invert_image: gray = 255 - gray if self.robust_detection: self.markers = detect_markers_robust( gray, grid_size=5, aperture=self.aperture, prev_markers=self.markers, true_detect_every_frame=3, min_marker_perimeter=self.min_marker_perimeter, ) else: self.markers = detect_markers( gray, grid_size=5, aperture=self.aperture, min_marker_perimeter=self.min_marker_perimeter ) if self.mode == "Show marker IDs": draw_markers(frame.gray, self.markers) # locate surfaces, map gaze for s in self.surfaces: s.locate( self.markers, self.camera_calibration, self.min_marker_perimeter, self.min_id_confidence, self.locate_3d ) if s.detected: s.gaze_on_srf = s.map_data_to_surface(events.get("gaze_positions", []), s.m_from_screen) else: s.gaze_on_srf = [] events["surface"] = [] for s in self.surfaces: if s.detected: events["surface"].append( { "name": s.name, "uid": s.uid, "m_to_screen": s.m_to_screen.tolist(), "m_from_screen": s.m_from_screen.tolist(), "gaze_on_srf": s.gaze_on_srf, "timestamp": frame.timestamp, "camera_pose_3d": s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None, } ) if self.running: self.button.status_text = "%s/%s" % (len([s for s in self.surfaces if s.detected]), len(self.surfaces)) else: self.button.status_text = "tracking paused" if self.mode == "Show Markers and Surfaces": # edit surfaces by user if self.edit_surf_verts: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos, glfwGetWindowSize(window), flip_y=True) for s, v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx, new_pos)
def update(self,frame,recent_pupil_positions,events): img = frame.img self.img_shape = frame.img.shape if self.robust_detection.value: self.markers = detect_markers_robust(img, grid_size = 5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture.value, visualize=0, true_detect_every_frame=3) else: self.markers = detect_markers_simple(img, grid_size = 5, min_marker_perimeter=self.min_marker_perimeter, aperture=self.aperture.value, visualize=0) # locate surfaces for s in self.surfaces: s.locate(self.markers) if s.detected: events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp}) if self.draw_markers.value: draw_markers(img,self.markers) # edit surfaces by user if self.surface_edit_mode: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels for s,v_idx in self.edit_surfaces: if s.detected: pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True) new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) #map recent gaze onto detected surfaces used for pupil server for s in self.surfaces: if s.detected: s.gaze_on_srf = [] for p in recent_pupil_positions: if p['norm_pupil'] is not None: gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_gaze']))) p['realtime gaze on '+s.name] = gp_on_s print gp_on_s #try: # ser = serial.Serial("/dev/ttyACM0") # ser.close() # ser.open() # print(ser.read()) # s.gaze_on_srf.append(gp_on_s) #except: # print("it didn't work!!!!!") #allow surfaces to open/close windows for s in self.surfaces: if s.window_should_close: s.close_window() if s.window_should_open: s.open_window()
def update(self,frame,events): self.img_shape = frame.height,frame.width,3 if self.running: self.current_gaze_pos = self.get_gaze_pos(events) #drawing only in undistord image if self.show_undistord: self.frame_img = undistord_with_roi(img=frame.img, cm=self.camera_intrinsics[0], dist_coef=self.camera_intrinsics[1], roi=self.roi, new_cm=self.camera_intrinsics[4]) gray = cv2.cvtColor(self.frame_img, cv2.COLOR_BGR2GRAY) cv2.imshow("test", self.frame_img) else: self.frame_img = frame.img gray = frame.gray if self.robust_detection: visible_markers = detect_markers_robust(gray, grid_size = 5, prev_markers=self.markers, min_marker_perimeter=self.min_marker_perimeter, aperture = 11, visualize=0, true_detect_every_frame=3) else: visible_markers = detect_markers(gray, grid_size = 5, min_marker_perimeter=self.min_marker_perimeter, aperture = 11, visualize=0) for m in self.markers: m['visible'] = False self.find_main_marker(visible_markers ) for vm in visible_markers: #find the index of the visible marker in self.markers index = -1 for indexList,m in enumerate(self.markers): if m['id'] == vm['id']: index = indexList break if index == -1: #marker is not registered already index = len(self.markers) new_marker = {'id':vm['id'],'verts':vm['verts'],'verts_norm':vm['verts_norm'],'centroid':vm['centroid'],'frames_since_true_detection':0,'height':76,'frames_looked_up_count':0,'obj_name':"None",'obj':None,'mult':1} self.markers.append(new_marker) marker = self.markers[index] marker['verts'] = vm['verts'] marker['verts_norm'] = vm['verts_norm'] marker['centroid'] = vm['centroid'] marker['frames_since_true_detection'] = vm['frames_since_true_detection'] marker['visible'] = True objp = gen_square_pattern_grid(marker['height']) # Find the rotation and translation vectors. _, rvecs, tvecs = cv2.solvePnP(objp, marker['verts'], self.camera_intrinsics[0], None) #Already undistord, no need to give dist coeffs #if the marker is fixed by the gaze if self.is_looked_up(marker): #get the obj to draw if self.mode == "Draw obj": if marker['obj'] == None and marker['obj_name'] != "None": marker['obj'] = OBJ("../ressources/"+marker['obj_name'], marker['mult'], swapyz=True) marker['rot'] = rvecs marker['trans'] = tvecs marker['to_draw'] = True else : #not fixed if self.mode == "Draw obj": marker['to_draw'] = False if not self.running: self.button.status_text = 'tracking paused'
def recent_events(self, events): frame = events.get("frame") if not frame: return self.img_shape = frame.height, frame.width, 3 if self.running: gray = frame.gray if self.invert_image: gray = 255 - gray if self.robust_detection: self.markers = detect_markers_robust( gray, grid_size=5, aperture=self.aperture, prev_markers=self.markers, true_detect_every_frame=3, min_marker_perimeter=self.min_marker_perimeter, ) else: self.markers = detect_markers( gray, grid_size=5, aperture=self.aperture, min_marker_perimeter=self.min_marker_perimeter, ) if self.mode == "Show marker IDs": draw_markers(frame.gray, self.markers) # locate surfaces, map gaze for s in self.surfaces: s.locate( self.markers, self.min_marker_perimeter, self.min_id_confidence, self.locate_3d, ) if s.detected: s.gaze_on_srf = s.map_data_to_surface(events.get("gaze", []), s.m_from_screen) s.fixations_on_srf = s.map_data_to_surface( events.get("fixations", []), s.m_from_screen) s.update_gaze_history() else: s.gaze_on_srf = [] s.fixations_on_srf = [] events["surfaces"] = [] for s in self.surfaces: if s.detected: datum = { "topic": "surfaces.{}".format(s.name), "name": s.name, "uid": s.uid, "m_to_screen": s.m_to_screen.tolist(), "m_from_screen": s.m_from_screen.tolist(), "gaze_on_srf": s.gaze_on_srf, "fixations_on_srf": s.fixations_on_srf, "timestamp": frame.timestamp, "camera_pose_3d": s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None, } events["surfaces"].append(datum) if self.running: self.button.status_text = "{}/{}".format( len([s for s in self.surfaces if s.detected]), len(self.surfaces)) else: self.button.status_text = "tracking paused" if self.mode == "Show Markers and Surfaces": # edit surfaces by user if self.edit_surf_verts: pos = self._last_mouse_pos for s, v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx, new_pos)
# status,img = cap.read() # markers = [] # while status: # markers = square_marker_detect.detect_markers_robust( cv2.cvtColor(img,cv2.COLOR_BGR2GRAY),5,markers,true_detect_every_frame=1) # status,img = cap.read() # if markers: # return if __name__ == '__main__': # import cProfile,subprocess,os # cProfile.runctx("bench()",{},locals(),"world.pstats") # loc = os.path.abspath(__file__).rsplit('pupil_src', 1) # gprof2dot_loc = os.path.join(loc[0], 'pupil_src', 'shared_modules','gprof2dot.py') # subprocess.call("python "+gprof2dot_loc+" -f pstats world.pstats | dot -Tpng -o world_cpu_time.png", shell=True) # print "created time graph for process. Please check out the png next to this file" # exit() cap = cv2.VideoCapture('/home/manasi/Downloads/fiducial_markers.mkv') status,img = cap.read() gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) t = Timer(lambda: square_marker_detect.detect_markers_robust(gray_img,5,[],true_detect_every_frame=1)) print "Time required for cython" print t.timeit(number=100) #