def eye_movement_detection_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ) -> Offline_Detection_Task_Generator: def serialized_dict(datum): if type(datum) is dict: return fm.Serialized_Dict(python_dict=datum) elif type(datum) is bytes: return fm.Serialized_Dict(msgpack_bytes=datum) else: raise ValueError("Unsupported gaze datum type: {}.".format(type(datum))) yield EYE_MOVEMENT_DETECTION_STEP_PREPARING_LOCALIZED_STRING, () gaze_data = [serialized_dict(datum) for datum in gaze_data] if not gaze_data: utils.logger.warning("No data available to find fixations") yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, () return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) yield EYE_MOVEMENT_DETECTION_STEP_PROCESSING_LOCALIZED_STRING, () eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, gaze_time, use_pupil=use_pupil ) yield EYE_MOVEMENT_DETECTION_STEP_CLASSIFYING_LOCALIZED_STRING, () gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) # `gaze_classification` holds the classification for each gaze datum. yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, () for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue serialized = segment.to_msgpack() yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, serialized yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, ()
def __init__(self, max_segment_count: int = 1, max_sample_count: int = 1000): self._capture = None self._gaze_data_buffer = stdlib_utils.sliceable_deque( [], maxlen=max_sample_count) self._segment_buffer = stdlib_utils.sliceable_deque( [], maxlen=max_segment_count) self._segment_factory = model.Classified_Segment_Factory() self._is_gaze_buffer_classified: bool = True
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if len(gaze_data) < 2: utils.logger.warning("Not enough data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory( start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) try: eye_positions = utils.gaze_data_to_nslr_data(capture, gaze_data, gaze_time, use_pupil=use_pupil) except utils.NSLRValidationError as e: utils.logger.error(f"{e}") return gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions) # by-gaze clasification, modifies events["gaze"] by reference for gaze, classification in zip(gaze_data, gaze_classification): gaze[utils. EYE_MOVEMENT_GAZE_KEY] = model.Segment_Class.from_nslr_class( classification).value # by-segment classification for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, world_timestamps=capture.timestamps, ) if not segment: continue yield segment
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if not gaze_data: utils.logger.warning("No data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, use_pupil=use_pupil ) gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue yield segment