def eye_movement_detection_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ) -> Offline_Detection_Task_Generator: def serialized_dict(datum): if type(datum) is dict: return fm.Serialized_Dict(python_dict=datum) elif type(datum) is bytes: return fm.Serialized_Dict(msgpack_bytes=datum) else: raise ValueError("Unsupported gaze datum type: {}.".format(type(datum))) yield EYE_MOVEMENT_DETECTION_STEP_PREPARING_LOCALIZED_STRING, () gaze_data = [serialized_dict(datum) for datum in gaze_data] if not gaze_data: utils.logger.warning("No data available to find fixations") yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, () return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) yield EYE_MOVEMENT_DETECTION_STEP_PROCESSING_LOCALIZED_STRING, () eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, gaze_time, use_pupil=use_pupil ) yield EYE_MOVEMENT_DETECTION_STEP_CLASSIFYING_LOCALIZED_STRING, () gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) # `gaze_classification` holds the classification for each gaze datum. yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, () for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue serialized = segment.to_msgpack() yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, serialized yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, ()
def eye_movement_detection_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ) -> Offline_Detection_Task_Generator: def serialized_dict(datum): if type(datum) is dict: return fm.Serialized_Dict(python_dict=datum) elif type(datum) is bytes: return fm.Serialized_Dict(msgpack_bytes=datum) else: raise ValueError("Unsupported gaze datum type: {}.".format(type(datum))) yield EYE_MOVEMENT_DETECTION_STEP_PREPARING_LOCALIZED_STRING, () gaze_data = [serialized_dict(datum) for datum in gaze_data] if not gaze_data: utils.logger.warning("No data available to find fixations") yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, () return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) yield EYE_MOVEMENT_DETECTION_STEP_PROCESSING_LOCALIZED_STRING, () eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, use_pupil=use_pupil ) yield EYE_MOVEMENT_DETECTION_STEP_CLASSIFYING_LOCALIZED_STRING, () gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) # `gaze_classification` holds the classification for each gaze datum. yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, () for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue serialized = segment.to_msgpack() yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, serialized yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, ()
def recent_events(self, events): events["fixations"] = [] gaze = events["gaze"] gaze = (gp for gp in gaze if gp["confidence"] >= self.confidence_threshold) self.history.extend(gaze) self.history.sort(key=lambda gp: gp["timestamp"]) if not self.history: self.recent_fixation = None return try: ts_oldest = self.history[0]["timestamp"] ts_newest = self.history[-1]["timestamp"] inconsistent_timestamps = ts_newest < ts_oldest if inconsistent_timestamps: self.reset_history() return age_threshold = ts_newest - self.min_duration / 1000.0 # pop elements until only one element below the age threshold remains: while self.history[1]["timestamp"] < age_threshold: del self.history[0] # remove outdated gaze points except IndexError: pass method = ( FixationDetectionMethod.GAZE_3D if can_use_3d_gaze_mapping(self.history) else FixationDetectionMethod.GAZE_2D ) base_data = self.history if len(base_data) <= 2 or ( base_data[-1]["timestamp"] - base_data[0]["timestamp"] < self.min_duration / 1000.0 ): self.recent_fixation = None return dispersion = gaze_dispersion(self.g_pool.capture, base_data, method) if dispersion < np.deg2rad(self.max_dispersion): new_fixation = fixation_from_data(dispersion, method, base_data) if self.recent_fixation: new_fixation["id"] = self.recent_fixation["id"] else: new_fixation["id"] = self.id_counter self.id_counter += 1 self.replace_basedata_with_references(new_fixation) events["fixations"].append(new_fixation) self.recent_fixation = new_fixation else: self.recent_fixation = None
def recent_events(self, events): events["fixations"] = [] gaze = events["gaze"] self.history.extend( (gp for gp in gaze if gp["confidence"] >= self.confidence_threshold) ) try: ts_oldest = self.history[0]["timestamp"] ts_newest = self.history[-1]["timestamp"] inconsistent_timestamps = ts_newest < ts_oldest if inconsistent_timestamps: self.reset_history() return age_threshold = ts_newest - self.min_duration / 1000.0 # pop elements until only one element below the age threshold remains: while self.history[1]["timestamp"] < age_threshold: self.history.popleft() # remove outdated gaze points except IndexError: pass gaze_3d = [gp for gp in self.history if "3d" in gp["base_data"][0]["method"]] use_pupil = can_use_3d_gaze_mapping(self.history) base_data = gaze_3d if use_pupil else self.history if ( len(base_data) <= 2 or base_data[-1]["timestamp"] - base_data[0]["timestamp"] < self.min_duration / 1000.0 ): self.recent_fixation = None return dispersion, origin, base_data = gaze_dispersion( self.g_pool.capture, base_data, use_pupil ) if dispersion < np.deg2rad(self.max_dispersion): new_fixation = fixation_from_data(dispersion, origin, base_data) if self.recent_fixation: new_fixation["id"] = self.recent_fixation["id"] else: new_fixation["id"] = self.id_counter self.id_counter += 1 self.replace_basedata_with_references(new_fixation) events["fixations"].append(new_fixation) self.recent_fixation = new_fixation else: self.recent_fixation = None
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if len(gaze_data) < 2: utils.logger.warning("Not enough data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory( start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) try: eye_positions = utils.gaze_data_to_nslr_data(capture, gaze_data, gaze_time, use_pupil=use_pupil) except utils.NSLRValidationError as e: utils.logger.error(f"{e}") return gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions) # by-gaze clasification, modifies events["gaze"] by reference for gaze, classification in zip(gaze_data, gaze_classification): gaze[utils. EYE_MOVEMENT_GAZE_KEY] = model.Segment_Class.from_nslr_class( classification).value # by-segment classification for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, world_timestamps=capture.timestamps, ) if not segment: continue yield segment
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if not gaze_data: utils.logger.warning("No data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, use_pupil=use_pupil ) gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue yield segment
def detect_fixations(capture, gaze_data, max_dispersion, min_duration, max_duration, min_data_confidence): yield "Detecting fixations...", () gaze_data = (fm.Serialized_Dict(msgpack_bytes=serialized) for serialized in gaze_data) gaze_data = [ datum for datum in gaze_data if datum["confidence"] > min_data_confidence ] if not gaze_data: logger.warning("No data available to find fixations") return "Fixation detection failed", () method = (FixationDetectionMethod.GAZE_3D if can_use_3d_gaze_mapping(gaze_data) else FixationDetectionMethod.GAZE_2D) logger.info(f"Starting fixation detection using {method.value} data...") fixation_result = Fixation_Result_Factory() working_queue = deque() remaining_gaze = deque(gaze_data) while remaining_gaze: # check if working_queue contains enough data if (len(working_queue) < 2 or (working_queue[-1]["timestamp"] - working_queue[0]["timestamp"]) < min_duration): datum = remaining_gaze.popleft() working_queue.append(datum) continue # min duration reached, check for fixation dispersion = gaze_dispersion(capture, working_queue, method) if dispersion > max_dispersion: # not a fixation, move forward working_queue.popleft() continue left_idx = len(working_queue) # minimal fixation found. collect maximal data # to perform binary search for fixation end while remaining_gaze: datum = remaining_gaze[0] if datum["timestamp"] > working_queue[0][ "timestamp"] + max_duration: break # maximum data found working_queue.append(remaining_gaze.popleft()) # check for fixation with maximum duration dispersion = gaze_dispersion(capture, working_queue, method) if dispersion <= max_dispersion: fixation = fixation_result.from_data(dispersion, method, working_queue, capture.timestamps) yield "Detecting fixations...", fixation working_queue.clear() # discard old Q continue slicable = list(working_queue) # deque does not support slicing right_idx = len(working_queue) # binary search while left_idx < right_idx - 1: middle_idx = (left_idx + right_idx) // 2 dispersion = gaze_dispersion( capture, slicable[:middle_idx + 1], method, ) if dispersion <= max_dispersion: left_idx = middle_idx else: right_idx = middle_idx # left_idx-1 is last valid base datum final_base_data = slicable[:left_idx] to_be_placed_back = slicable[left_idx:] dispersion_result = gaze_dispersion(capture, final_base_data, method) fixation = fixation_result.from_data(dispersion_result, method, final_base_data, capture.timestamps) yield "Detecting fixations...", fixation working_queue.clear() # clear queue remaining_gaze.extendleft(reversed(to_be_placed_back)) yield "Fixation detection complete", ()
def detect_fixations( capture, gaze_data, max_dispersion, min_duration, max_duration, min_data_confidence ): yield "Detecting fixations...", () gaze_data = [ fm.Serialized_Dict(msgpack_bytes=serialized) for serialized in gaze_data ] if not gaze_data: logger.warning("No data available to find fixations") return "Fixation detection complete", () use_pupil = can_use_3d_gaze_mapping(gaze_data) logger.info( "Starting fixation detection using {} data...".format( "3d" if use_pupil else "2d" ) ) fixation_result = Fixation_Result_Factory() Q = deque() enum = deque(gaze_data) while enum: # check if Q contains enough data if len(Q) < 2 or Q[-1]["timestamp"] - Q[0]["timestamp"] < min_duration: datum = enum.popleft() Q.append(datum) continue # min duration reached, check for fixation dispersion, origin, base_data = gaze_dispersion(capture, Q, use_pupil=use_pupil) if dispersion > max_dispersion: # not a fixation, move forward Q.popleft() continue left_idx = len(Q) # minimal fixation found. collect maximal data # to perform binary search for fixation end while enum: datum = enum[0] if datum["timestamp"] > Q[0]["timestamp"] + max_duration: break # maximum data found Q.append(enum.popleft()) # check for fixation with maximum duration dispersion, origin, base_data = gaze_dispersion(capture, Q, use_pupil=use_pupil) if dispersion <= max_dispersion: fixation = fixation_result.from_data( dispersion, origin, base_data, capture.timestamps ) yield "Detecting fixations...", fixation Q.clear() # discard old Q continue slicable = list(Q) # deque does not support slicing right_idx = len(Q) # binary search while left_idx + 1 < right_idx: middle_idx = (left_idx + right_idx) // 2 + 1 dispersion, origin, base_data = gaze_dispersion( capture, slicable[:middle_idx], use_pupil=use_pupil ) if dispersion <= max_dispersion: left_idx = middle_idx - 1 else: right_idx = middle_idx - 1 middle_idx = (left_idx + right_idx) // 2 dispersion_result = gaze_dispersion( capture, slicable[:middle_idx], use_pupil=use_pupil ) fixation = fixation_result.from_data(*dispersion_result, capture.timestamps) yield "Detecting fixations...", fixation Q.clear() # clear queue enum.extendleft(slicable[middle_idx:]) yield "Fixation detection complete", ()