def detect_marker(self, frame): gray_img = frame.gray markers = find_concetric_circles(gray_img,min_ring_count=3) detected = False pos = None if len(markers) > 0: detected = True marker_pos = markers[0][0][0] #first marker innermost ellipse, pos pos = marker_pos # normalize(marker_pos,(frame.width,frame.height),flip_y=True) else: detected = False pos = None #indicate that no reference is detected ''' return either detected and pos or just detected If I'm to crowdsource all of this then I just need to know when is the stop marker was detected ''' # # center dark or white? # if detected: # second_ellipse = markers[0][1] # col_slice = int(second_ellipse[0][0]-second_ellipse[1][0]/2),int(second_ellipse[0][0]+second_ellipse[1][0]/2) # row_slice = int(second_ellipse[0][1]-second_ellipse[1][1]/2),int(second_ellipse[0][1]+second_ellipse[1][1]/2) # marker_gray = gray_img[slice(*row_slice),slice(*col_slice)] # avg = cv2.mean(marker_gray)[0] #CV2 fn return has changed! # center = marker_gray[second_ellipse[1][1]/2,second_ellipse[1][0]/2] # rel_shade = center-avg # # #auto_stop logic # if rel_shade > 30: # #bright marker center found # auto_stop +=1 # stop_marker_found = True # active_cal = False # else: # auto_stop = 0 # stop_marker_found = False return detected, pos
def recent_events(self, events): frame = events.get('frame') if self.active and frame: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.clicks_to_close <=0: self.stop() return # detect the marker self.markers = find_concetric_circles(gray_img, min_ring_count=4) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][0] # first marker, innermost ellipse,center self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: self.detected = False self.pos = None # indicate that no reference is detected # only save a valid ref position if within sample window of calibraiton routine on_position = self.lead_in < self.screen_marker_state < (self.lead_in+self.sample_duration) if on_position and self.detected: ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) # always save pupil positions for p_pt in recent_pupil_positions: if p_pt['confidence'] > self.pupil_confidence_threshold: self.pupil_list.append(p_pt) if on_position and self.detected and events.get('fixations', []): self.screen_marker_state = min( self.sample_duration+self.lead_in, self.screen_marker_state+self.fixation_boost) # Animate the screen marker if self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out: if self.detected or not on_position: self.screen_marker_state += 1 else: self.screen_marker_state = 0 if not self.sites: self.stop() return self.active_site = self.sites.pop(0) logger.debug("Moving screen marker to site at {} {}".format(*self.active_site)) # use np.arrays for per element wise math self.display_pos = np.array(self.active_site) self.on_position = on_position self.button.status_text = '{} / {}'.format(self.active_site, 9)
def recent_events(self, events): frame = events.get('frame') if self.active and frame: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.clicks_to_close <=0: self.stop() return # detect the marker self.markers = find_concetric_circles(gray_img, min_ring_count=4) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][0] # first marker, innermost ellipse,center self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: self.detected = False self.pos = None # indicate that no reference is detected # only save a valid ref position if within sample window of calibraiton routine on_position = self.lead_in < self.screen_marker_state if on_position and self.detected: ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) # always save pupil positions for p_pt in recent_pupil_positions: if p_pt['confidence'] > self.pupil_confidence_threshold: self.pupil_list.append(p_pt) # Animate the screen marker if self.detected or not on_position: self.screen_marker_state += 1 # use np.arrays for per element wise math self.on_position = on_position if self._window: self.gl_display_in_window()
def recent_events(self, events): frame = events.get('frame') if self.active and frame: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.clicks_to_close <=0: self.stop() return # detect the marker self.markers = find_concetric_circles(gray_img, min_ring_count=4) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][0] # first marker, innermost ellipse,center self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: self.detected = False self.pos = None # indicate that no reference is detected # only save a valid ref position if within sample window of calibraiton routine on_position = self.lead_in < self.screen_marker_state if on_position and self.detected: ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) # always save pupil positions for p_pt in recent_pupil_positions: if p_pt['confidence'] > self.pupil_confidence_threshold: self.pupil_list.append(p_pt) # Animate the screen marker if self.detected or not on_position: self.screen_marker_state += 1 # use np.arrays for per element wise math self.on_position = on_position
def update(self,frame,events): """ gets called once every frame. reference positon need to be published to shared_pos if no reference was found, publish 0,0 """ if self.active: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.world_size is None: self.world_size = frame.width,frame.height self.markers = find_concetric_circles(gray_img,min_ring_count=3) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][0] #first marker, innermost ellipse, center self.pos = normalize(marker_pos,(frame.width,frame.height),flip_y=True) else: self.detected = False self.pos = None #indicate that no reference is detected #tracking logic if self.detected: self.auto_stop +=1 self.stop_marker_found = True # calculate smoothed manhattan velocity smoother = 0.3 smooth_pos = np.array(self.smooth_pos) pos = np.array(self.pos) new_smooth_pos = smooth_pos + smoother*(pos-smooth_pos) smooth_vel_vec = new_smooth_pos - smooth_pos smooth_pos = new_smooth_pos self.smooth_pos = list(smooth_pos) #manhattan distance for velocity new_vel = abs(smooth_vel_vec[0])+abs(smooth_vel_vec[1]) self.smooth_vel = self.smooth_vel + smoother*(new_vel-self.smooth_vel) #distance to last sampled site sample_ref_dist = smooth_pos-np.array(self.sample_site) sample_ref_dist = abs(sample_ref_dist[0])+abs(sample_ref_dist[1]) # start counter if ref is resting in place and not at last sample site if not self.counter: if self.smooth_vel < 0.01 and sample_ref_dist > 0.1: self.sample_site = self.smooth_pos audio.beep() logger.debug("Steady marker found. Starting to sample {} datapoints".format(self.counter_max)) self.counter = self.counter_max if self.counter: if self.smooth_vel > 0.01: audio.tink() logger.warning("Marker moved to quickly: Aborted sample. Sampled {} datapoints. Looking for steady marker again.".format((self.counter_max-self.counter))) self.counter = 0 else: self.counter -= 1 ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = denormalize(self.pos,(frame.width,frame.height),flip_y=True) ref["timestamp"] = frame.timestamp self.ref_list.append(ref) if self.counter == 0: #last sample before counter done and moving on audio.tink() logger.debug("Sampled {} datapoints. Stopping to sample. Looking for steady marker again.".format(self.counter_max)) #always save pupil positions for pt in events.get('gaze_positions',[]): if pt['confidence'] > self.pupil_confidence_threshold: #we add an id for the calibration preprocess data to work as is usually expects pupil data. pt['id'] = 0 self.gaze_list.append(pt) if self.counter: if self.detected: self.button.status_text = 'Sampling Gaze Data' else: self.button.status_text = 'Marker Lost' else: self.button.status_text = 'Looking for Marker' # stop if autostop condition is satisfied: if self.auto_stop >= self.auto_stop_max: self.auto_stop = 0 self.stop() else: pass
def recent_events(self, events): frame = events.get('frame') if not frame: return """ gets called once every frame. reference positon need to be published to shared_pos if no reference was found, publish 0,0 """ if self.active: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.world_size is None: self.world_size = frame.width, frame.height self.markers = find_concetric_circles(gray_img, min_ring_count=3) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][ 0] #first marker, innermost ellipse, center self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: self.detected = False self.pos = None #indicate that no reference is detected #tracking logic if self.detected: self.auto_stop += 1 self.stop_marker_found = True # calculate smoothed manhattan velocity smoother = 0.3 smooth_pos = np.array(self.smooth_pos) pos = np.array(self.pos) new_smooth_pos = smooth_pos + smoother * (pos - smooth_pos) smooth_vel_vec = new_smooth_pos - smooth_pos smooth_pos = new_smooth_pos self.smooth_pos = list(smooth_pos) #manhattan distance for velocity new_vel = abs(smooth_vel_vec[0]) + abs(smooth_vel_vec[1]) self.smooth_vel = self.smooth_vel + smoother * ( new_vel - self.smooth_vel) #distance to last sampled site sample_ref_dist = smooth_pos - np.array(self.sample_site) sample_ref_dist = abs(sample_ref_dist[0]) + abs( sample_ref_dist[1]) # start counter if ref is resting in place and not at last sample site if not self.counter: if self.smooth_vel < 0.01 and sample_ref_dist > 0.1: self.sample_site = self.smooth_pos audio.beep() logger.debug( "Steady marker found. Starting to sample {} datapoints" .format(self.counter_max)) self.counter = self.counter_max if self.counter: if self.smooth_vel > 0.01: audio.tink() logger.warning( "Marker moved to quickly: Aborted sample. Sampled {} datapoints. Looking for steady marker again." .format((self.counter_max - self.counter))) self.counter = 0 else: self.counter -= 1 ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = denormalize( self.pos, (frame.width, frame.height), flip_y=True) ref["timestamp"] = frame.timestamp self.ref_list.append(ref) if self.counter == 0: #last sample before counter done and moving on audio.tink() logger.debug( "Sampled {} datapoints. Stopping to sample. Looking for steady marker again." .format(self.counter_max)) #always save pupil positions for pt in events.get('gaze_positions', []): if pt['confidence'] > self.pupil_confidence_threshold: #we add an id for the calibration preprocess data to work as is usually expects pupil data. pt['id'] = 0 self.gaze_list.append(pt) if self.counter: if self.detected: self.button.status_text = 'Sampling Gaze Data' else: self.button.status_text = 'Marker Lost' else: self.button.status_text = 'Looking for Marker' # stop if autostop condition is satisfied: if self.auto_stop >= self.auto_stop_max: self.auto_stop = 0 self.stop() else: pass
def update(self,frame,events): """ gets called once every frame. reference positon need to be published to shared_pos if no reference was found, publish 0,0 """ if self.active: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.world_size is None: self.world_size = frame.width,frame.height self.markers = find_concetric_circles(gray_img,min_ring_count=3) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][0] #first marker innermost ellipse, pos self.pos = normalize(marker_pos,(frame.width,frame.height),flip_y=True) else: self.detected = False self.pos = None #indicate that no reference is detected # center dark or white? if self.detected: second_ellipse = self.markers[0][1] col_slice = int(second_ellipse[0][0]-second_ellipse[1][0]/2),int(second_ellipse[0][0]+second_ellipse[1][0]/2) row_slice = int(second_ellipse[0][1]-second_ellipse[1][1]/2),int(second_ellipse[0][1]+second_ellipse[1][1]/2) marker_gray = gray_img[slice(*row_slice),slice(*col_slice)] avg = cv2.mean(marker_gray)[0] center = marker_gray[second_ellipse[1][1]//2,second_ellipse[1][0]//2] rel_shade = center-avg #auto_stop logic if rel_shade > 30: #bright marker center found self.auto_stop +=1 self.stop_marker_found = True else: self.auto_stop = 0 self.stop_marker_found = False #tracking logic if self.detected and not self.stop_marker_found: # calculate smoothed manhattan velocity smoother = 0.3 smooth_pos = np.array(self.smooth_pos) pos = np.array(self.pos) new_smooth_pos = smooth_pos + smoother*(pos-smooth_pos) smooth_vel_vec = new_smooth_pos - smooth_pos smooth_pos = new_smooth_pos self.smooth_pos = list(smooth_pos) #manhattan distance for velocity new_vel = abs(smooth_vel_vec[0])+abs(smooth_vel_vec[1]) self.smooth_vel = self.smooth_vel + smoother*(new_vel-self.smooth_vel) #distance to last sampled site sample_ref_dist = smooth_pos-np.array(self.sample_site) sample_ref_dist = abs(sample_ref_dist[0])+abs(sample_ref_dist[1]) # start counter if ref is resting in place and not at last sample site if not self.counter: if self.smooth_vel < 0.01 and sample_ref_dist > 0.1: self.sample_site = self.smooth_pos audio.beep() logger.debug("Steady marker found. Starting to sample {} datapoints".format(self.counter_max)) self.notify_all({'subject':'calibration.marker_found','timestamp':self.g_pool.get_timestamp(),'record':True}) self.counter = self.counter_max if self.counter: if self.smooth_vel > 0.01: audio.tink() logger.warning("Marker moved too quickly: Aborted sample. Sampled {} datapoints. Looking for steady marker again.".format(self.counter_max-self.counter)) self.notify_all({'subject':'calibration.marker_moved_too_quickly','timestamp':self.g_pool.get_timestamp(),'record':True}) self.counter = 0 else: self.counter -= 1 ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) if self.counter == 0: #last sample before counter done and moving on audio.tink() logger.debug("Sampled {} datapoints. Stopping to sample. Looking for steady marker again.".format(self.counter_max)) self.notify_all({'subject':'calibration.marker_sample_completed','timestamp':self.g_pool.get_timestamp(),'record':True}) #always save pupil positions for p_pt in recent_pupil_positions: if p_pt['confidence'] > self.pupil_confidence_threshold: self.pupil_list.append(p_pt) if self.counter: if self.detected: self.button.status_text = 'Sampling Gaze Data' else: self.button.status_text = 'Marker Lost' else: self.button.status_text = 'Looking for Marker' #stop if autostop condition is satisfied: if self.auto_stop >=self.auto_stop_max: self.auto_stop = 0 self.stop() else: pass
def update(self, frame, events): """ gets called once every frame. reference positon need to be published to shared_pos if no reference was found, publish 0,0 """ if self.active: recent_pupil_positions = events['pupil_positions'] gray_img = frame.gray if self.world_size is None: self.world_size = frame.width, frame.height self.markers = find_concetric_circles(gray_img, min_ring_count=3) if len(self.markers) > 0: self.detected = True marker_pos = self.markers[0][0][ 0] #first marker innermost ellipse, pos self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: self.detected = False self.pos = None #indicate that no reference is detected # center dark or white? if self.detected: second_ellipse = self.markers[0][1] col_slice = int(second_ellipse[0][0] - second_ellipse[1][0] / 2), int(second_ellipse[0][0] + second_ellipse[1][0] / 2) row_slice = int(second_ellipse[0][1] - second_ellipse[1][1] / 2), int(second_ellipse[0][1] + second_ellipse[1][1] / 2) marker_gray = gray_img[slice(*row_slice), slice(*col_slice)] avg = cv2.mean(marker_gray)[0] #CV2 fn return has changed! center = marker_gray[second_ellipse[1][1] / 2, second_ellipse[1][0] / 2] rel_shade = center - avg #auto_stop logic if rel_shade > 30: #bright marker center found self.auto_stop += 1 self.stop_marker_found = True else: self.auto_stop = 0 self.stop_marker_found = False #tracking logic if self.detected and not self.stop_marker_found: # calculate smoothed manhattan velocity smoother = 0.3 smooth_pos = np.array(self.smooth_pos) pos = np.array(self.pos) new_smooth_pos = smooth_pos + smoother * (pos - smooth_pos) smooth_vel_vec = new_smooth_pos - smooth_pos smooth_pos = new_smooth_pos self.smooth_pos = list(smooth_pos) #manhattan distance for velocity new_vel = abs(smooth_vel_vec[0]) + abs(smooth_vel_vec[1]) self.smooth_vel = self.smooth_vel + smoother * ( new_vel - self.smooth_vel) #distance to last sampled site sample_ref_dist = smooth_pos - np.array(self.sample_site) sample_ref_dist = abs(sample_ref_dist[0]) + abs( sample_ref_dist[1]) # start counter if ref is resting in place and not at last sample site if not self.counter: if self.smooth_vel < 0.01 and sample_ref_dist > 0.1: self.sample_site = self.smooth_pos audio.beep() logger.debug( "Steady marker found. Starting to sample %s datapoints" % self.counter_max) self.notify_all({ 'subject': 'calibration.marker_found', 'timestamp': self.g_pool.capture.get_timestamp(), 'record': True, 'network_propagate': True }) self.counter = self.counter_max if self.counter: if self.smooth_vel > 0.01: audio.tink() logger.warning( "Marker moved too quickly: Aborted sample. Sampled %s datapoints. Looking for steady marker again." % (self.counter_max - self.counter)) self.notify_all({ 'subject': 'calibration.marker_moved_too_quickly', 'timestamp': self.g_pool.capture.get_timestamp(), 'record': True, 'network_propagate': True }) self.counter = 0 else: self.counter -= 1 ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) if self.counter == 0: #last sample before counter done and moving on audio.tink() logger.debug( "Sampled %s datapoints. Stopping to sample. Looking for steady marker again." % self.counter_max) self.notify_all({ 'subject': 'calibration.marker_sample_completed', 'timestamp': self.g_pool.capture.get_timestamp(), 'record': True, 'network_propagate': True }) #always save pupil positions for p_pt in recent_pupil_positions: if p_pt['confidence'] > self.pupil_confidence_threshold: self.pupil_list.append(p_pt) if self.counter: if self.detected: self.button.status_text = 'Sampling Gaze Data' else: self.button.status_text = 'Marker Lost' else: self.button.status_text = 'Looking for Marker' #stop if autostop condition is satisfied: if self.auto_stop >= self.auto_stop_max: self.auto_stop = 0 self.stop() else: pass
def circle_detector(ipc_push_url, pair_url, source_path, batch_size=20): # ipc setup import zmq import zmq_tools zmq_ctx = zmq.Context() process_pipe = zmq_tools.Msg_Pair_Client(zmq_ctx, pair_url) # logging setup import logging logging.getLogger("OpenGL").setLevel(logging.ERROR) logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.INFO) logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url)) # create logger for the context of this function logger = logging.getLogger(__name__) # imports import cv2 from time import sleep from circle_detector import find_concetric_circles from video_capture import File_Source, EndofVideoFileError from methods import normalize try: src = File_Source(Empty(), source_path, timed_playback=False) frame = src.get_frame() logger.info('Starting calibration marker detection...') frame_count = src.get_frame_count() queue = [] while True: while process_pipe.new_data: topic, n = process_pipe.recv() if topic == 'terminate': process_pipe.send(topic='exception', payload={"reason": "User terminated."}) logger.debug("Process terminated") sleep(1.0) return progress = 100. * frame.index / frame_count markers = find_concetric_circles(frame.gray, min_ring_count=3) if len(markers) > 0: detected = True marker_pos = markers[0][0][ 0] # first marker innermost ellipse, pos pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: detected = False pos = None if detected: second_ellipse = markers[0][1] col_slice = int(second_ellipse[0][0] - second_ellipse[1][0] / 2), int(second_ellipse[0][0] + second_ellipse[1][0] / 2) row_slice = int(second_ellipse[0][1] - second_ellipse[1][1] / 2), int(second_ellipse[0][1] + second_ellipse[1][1] / 2) marker_gray = frame.gray[slice(*row_slice), slice(*col_slice)] avg = cv2.mean(marker_gray)[0] center = marker_gray[int(second_ellipse[1][1]) // 2, int(second_ellipse[1][0]) // 2] rel_shade = center - avg ref = {} ref["norm_pos"] = pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp ref['index'] = frame.index if rel_shade > 30: ref['type'] = 'stop_marker' else: ref['type'] = 'calibration_marker' queue.append((progress, ref)) else: queue.append((progress, None)) if len(queue) > batch_size: # dequeue batch data = queue[:batch_size] del queue[:batch_size] process_pipe.send(topic='progress', payload={'data': data}) frame = src.get_frame() except EndofVideoFileError: process_pipe.send(topic='progress', payload={'data': queue}) process_pipe.send(topic='finished', payload={}) logger.debug("Process finished") except: import traceback process_pipe.send(topic='exception', payload={'reason': traceback.format_exc()}) logger.debug("Process raised Exception") sleep(1.0)
def circle_detector(ipc_push_url, pair_url, source_path, timestamps_path, batch_size=20): # ipc setup import zmq import zmq_tools zmq_ctx = zmq.Context() process_pipe = zmq_tools.Msg_Pair_Client(zmq_ctx, pair_url) # logging setup import logging logging.getLogger("OpenGL").setLevel(logging.ERROR) logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.INFO) logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url)) # create logger for the context of this function logger = logging.getLogger(__name__) # imports import cv2 import numpy as np from time import sleep from circle_detector import find_concetric_circles from video_capture import File_Source, EndofVideoFileError from methods import normalize try: src = File_Source(Empty(), source_path, np.load(timestamps_path), timed_playback=False) frame = src.get_frame() logger.info('Starting calibration marker detection...') frame_count = src.get_frame_count() queue = [] while True: while process_pipe.new_data: topic, n = process_pipe.recv() if topic == 'terminate': process_pipe.send(topic='exception', payload={"reason": "User terminated."}) logger.debug("Process terminated") sleep(1.0) return progress = 100.*frame.index/frame_count markers = find_concetric_circles(frame.gray, min_ring_count=3) if len(markers) > 0: detected = True marker_pos = markers[0][0][0] # first marker innermost ellipse, pos pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True) else: detected = False pos = None if detected: second_ellipse = markers[0][1] col_slice = int(second_ellipse[0][0]-second_ellipse[1][0]/2), int(second_ellipse[0][0]+second_ellipse[1][0]/2) row_slice = int(second_ellipse[0][1]-second_ellipse[1][1]/2), int(second_ellipse[0][1]+second_ellipse[1][1]/2) marker_gray = frame.gray[slice(*row_slice), slice(*col_slice)] avg = cv2.mean(marker_gray)[0] center = marker_gray[int(second_ellipse[1][1])//2, int(second_ellipse[1][0])//2] rel_shade = center-avg ref = {} ref["norm_pos"] = pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp ref['index'] = frame.index if rel_shade > 30: ref['type'] = 'stop_marker' else: ref['type'] = 'calibration_marker' queue.append((progress, ref)) else: queue.append((progress, None)) if len(queue) > batch_size: # dequeue batch data = queue[:batch_size] del queue[:batch_size] process_pipe.send(topic='progress', payload={'data': data}) frame = src.get_frame() except EndofVideoFileError: process_pipe.send(topic='progress', payload={'data': queue}) process_pipe.send(topic='finished', payload={}) logger.debug("Process finished") except: import traceback process_pipe.send(topic='exception', payload={'reason': traceback.format_exc()}) logger.debug("Process raised Exception") sleep(1.0)