def eye_movement_detection_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ) -> Offline_Detection_Task_Generator: def serialized_dict(datum): if type(datum) is dict: return fm.Serialized_Dict(python_dict=datum) elif type(datum) is bytes: return fm.Serialized_Dict(msgpack_bytes=datum) else: raise ValueError("Unsupported gaze datum type: {}.".format(type(datum))) yield EYE_MOVEMENT_DETECTION_STEP_PREPARING_LOCALIZED_STRING, () gaze_data = [serialized_dict(datum) for datum in gaze_data] if not gaze_data: utils.logger.warning("No data available to find fixations") yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, () return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) yield EYE_MOVEMENT_DETECTION_STEP_PROCESSING_LOCALIZED_STRING, () eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, gaze_time, use_pupil=use_pupil ) yield EYE_MOVEMENT_DETECTION_STEP_CLASSIFYING_LOCALIZED_STRING, () gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) # `gaze_classification` holds the classification for each gaze datum. yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, () for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue serialized = segment.to_msgpack() yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, serialized yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, ()
def eye_movement_detection_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ) -> Offline_Detection_Task_Generator: def serialized_dict(datum): if type(datum) is dict: return fm.Serialized_Dict(python_dict=datum) elif type(datum) is bytes: return fm.Serialized_Dict(msgpack_bytes=datum) else: raise ValueError("Unsupported gaze datum type: {}.".format(type(datum))) yield EYE_MOVEMENT_DETECTION_STEP_PREPARING_LOCALIZED_STRING, () gaze_data = [serialized_dict(datum) for datum in gaze_data] if not gaze_data: utils.logger.warning("No data available to find fixations") yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, () return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) yield EYE_MOVEMENT_DETECTION_STEP_PROCESSING_LOCALIZED_STRING, () eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, use_pupil=use_pupil ) yield EYE_MOVEMENT_DETECTION_STEP_CLASSIFYING_LOCALIZED_STRING, () gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) # `gaze_classification` holds the classification for each gaze datum. yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, () for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue serialized = segment.to_msgpack() yield EYE_MOVEMENT_DETECTION_STEP_DETECTING_LOCALIZED_STRING, serialized yield EYE_MOVEMENT_DETECTION_STEP_COMPLETE_LOCALIZED_STRING, ()
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if len(gaze_data) < 2: utils.logger.warning("Not enough data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory( start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) try: eye_positions = utils.gaze_data_to_nslr_data(capture, gaze_data, gaze_time, use_pupil=use_pupil) except utils.NSLRValidationError as e: utils.logger.error(f"{e}") return gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions) # by-gaze clasification, modifies events["gaze"] by reference for gaze, classification in zip(gaze_data, gaze_classification): gaze[utils. EYE_MOVEMENT_GAZE_KEY] = model.Segment_Class.from_nslr_class( classification).value # by-segment classification for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, world_timestamps=capture.timestamps, ) if not segment: continue yield segment
def _segment_generator( capture: model.Immutable_Capture, gaze_data: utils.Gaze_Data, factory_start_id: int = None, ): # TODO: Merge this version with the one in offline_detection_task if not gaze_data: utils.logger.warning("No data available to find fixations") return use_pupil = utils.can_use_3d_gaze_mapping(gaze_data) segment_factory = model.Classified_Segment_Factory(start_id=factory_start_id) gaze_time = np.array([gp["timestamp"] for gp in gaze_data]) eye_positions = utils.gaze_data_to_nslr_data( capture, gaze_data, use_pupil=use_pupil ) gaze_classification, segmentation, segment_classification = nslr_hmm.classify_gaze( gaze_time, eye_positions ) for i, nslr_segment in enumerate(segmentation.segments): nslr_segment_class = segment_classification[i] segment = segment_factory.create_segment( gaze_data=gaze_data, gaze_time=gaze_time, use_pupil=use_pupil, nslr_segment=nslr_segment, nslr_segment_class=nslr_segment_class, ) if not segment: continue yield segment
def detect_events_hmm(etsamples,etevents,et,smoothpursuit=True): #etevents = etevents.loc[etevents.start_time < etsamples.smpl_time.iloc[-1]] # First add blinks etsamples = append_eventtype_to_sample(etsamples,etevents,eventtype='blink') # run only on subset #etsamples = etsamples.iloc[1:10000] #etevents = etevents[etevents.end_time<etsamples.iloc[-1].smpl_time] # #etsamples = etsamples.iloc[1:1000] t = etsamples.query('type!="blink"').smpl_time.values eye = etsamples.query('type!="blink"')[['gx','gy']].values tic() sample_class, segmentation, seg_class = nslr_hmm.classify_gaze(t, eye,optimize_noise=True) toc() sample_class = sample_class.astype(int) if smoothpursuit: eventtypes = np.asarray(['fixation','saccade','pso','smoothpursuit']) else: eventtypes = np.asarray(['fixation','saccade','pso','fixation']) nonblink = etsamples.type != 'blink' etsamples.loc[nonblink,'type'] = eventtypes[sample_class-1] etevents = pd.concat([etevents, sampletype_to_event(etsamples,'saccade'), sampletype_to_event(etsamples,'smoothpursuit'), sampletype_to_event(etsamples,'pso'), sampletype_to_event(etsamples,'fixation')],ignore_index=True) return(etsamples,etevents)
def segment_hmm(df): # HMM sample_class, segmentation, seg_class = nslr_hmm.classify_gaze(df.ts.values, df[['gazeX', 'gazeY']].values, structural_error=0.2, optimize_noise=False # Assume 0.2 degree noise ) # recreate a new signal using the segmented results. gaze_interp = interp1d(segmentation.t, segmentation.x, axis=0, bounds_error=False) df['gazeX_s'], df['gazeY_s'] = gaze_interp(df.ts).T.copy() # COLORS = { # nslr_hmm.FIXATION: 'grey', # nslr_hmm.SACCADE: 'blue', # nslr_hmm.SMOOTH_PURSUIT: 'grey', # nslr_hmm.PSO: 'grey', # } # f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(13,9)) # hidtimes = df.ts.copy() # hidtimes[df.is_visible==1] = np.nan # ax1.plot(df.ts, df.target_x) # ax1.plot(hidtimes, df.target_x, label = 'hidden') # ax1.plot(df.ts[df.is_target==1], df.target_x[df.is_target==1], '.', markersize=3, label = 'target') # ax1.legend() # ax1.set_ylabel('target x (degrees)') # ax2.set_ylabel('x (degrees)') # for i, seg in enumerate(segmentation.segments): # cls = seg_class[i] # ax2.plot(seg.t, np.array(seg.x)[:,0], color=COLORS[cls], alpha=.6) # handlelist = [plt.plot([], marker="o", ls="", color=color)[0] for color in ['grey','blue']] # ax2.legend(handlelist,['other','saccade']) # ax3.set_ylabel('y (degrees)') # for i, seg in enumerate(segmentation.segments): # cls = seg_class[i] # ax3.plot(seg.t, np.array(seg.x)[:,1], color=COLORS[cls], alpha=.6) # handlelist = [plt.plot([], marker="o", ls="", color=color)[0] for color in ['grey','blue']] # ax3.legend(handlelist,['other','saccade']) # f.show() times = [] xcoords = [] ycoords = [] for seg in segmentation.segments: times.append(seg.t) xcoords.append(np.array(seg.x)[:,0]) ycoords.append(np.array(seg.x)[:,1]) xcoords = pd.DataFrame(xcoords, columns = ['x_begin', 'x_end']) ycoords = pd.DataFrame(ycoords, columns = ['y_begin', 'y_end']) times = pd.DataFrame(times, columns = ['ts_begin', 'ts_end']) segdf = pd.concat([xcoords,ycoords,times],axis=1).reset_index() segdf['sclass'] = seg_class segdf_long = pd.melt(segdf, id_vars = ['index', 'sclass']) segdf_long['var'], segdf_long['timept'] = segdf_long['variable'].str.split('_', 1).str segdf_long = segdf_long[['index', 'sclass','value', 'var', 'timept']].copy() segdf_long = segdf_long.pivot_table(index=['index', 'sclass', 'timept'], columns=['var'], values='value').reset_index().copy() saccades = segdf_long[segdf_long.sclass == 2] saccades = saccades.rename(columns={"index": "segment_index"}).reset_index() saccades = saccades[['timept', 'ts', 'x', 'y', 'segment_index']].copy() def merge_saccades(ds): #remove duplicate ts --> end/begin that have the same ts. remove_border = ds.duplicated(subset=['ts'], keep=False) ds = ds[~remove_border] # recalculate segment index ds['segment_index_old'] = ds['segment_index'].copy() ds['segment_index'] = ds.groupby('timept').cumcount()+1 return ds saccades_merged = merge_saccades(saccades) df = df.reset_index(drop=True).merge(saccades_merged, how = 'left').copy() return df
import numpy as np import matplotlib.pyplot as plt import scipy.signal import nslr_hmm # Simulate a dummy recording session t = np.arange(0, 5, 0.01) saw = ((t * 10) % 10) / 10.0 * 10.0 # 10 deg/second sawtooth horiz_gaze = saw vert_gaze = -saw eye = np.vstack((horiz_gaze, vert_gaze)).T eye += np.random.randn(*eye.shape) * 0.1 # Segment the data and classify the segments in one go using # the default parameters. sample_class, segmentation, seg_class = nslr_hmm.classify_gaze(t, eye) COLORS = { nslr_hmm.FIXATION: 'blue', nslr_hmm.SACCADE: 'black', nslr_hmm.SMOOTH_PURSUIT: 'green', nslr_hmm.PSO: 'yellow', } plt.plot(t, eye[:, 0], '.') for i, seg in enumerate(segmentation.segments): cls = seg_class[i] plt.plot(seg.t, np.array(seg.x)[:, 0], color=COLORS[cls]) plt.show()
def segment_hmm(df): # remove duplicated timestamps df = df.drop_duplicates(subset=['ts'], keep='first') # remove NANs from gaze df = df[~((df.gazeX.isnull()) | (df.gazeY.isnull()))] # HMM sample_class, segmentation, seg_class = nslr_hmm.classify_gaze( df.ts.values, df[['gazeX', 'gazeY']].values, structural_error=0.2, optimize_noise=False # Assume 0.2 degree noise ) # recreate a new signal using the segmented results. gaze_interp = interp1d(segmentation.t, segmentation.x, axis=0, bounds_error=False) df['gazeX_s'], df['gazeY_s'] = gaze_interp(df.ts).T.copy() times = [] xcoords = [] ycoords = [] for seg in segmentation.segments: times.append(seg.t) xcoords.append(np.array(seg.x)[:, 0]) ycoords.append(np.array(seg.x)[:, 1]) xcoords = pd.DataFrame(xcoords, columns=['x_begin', 'x_end']) ycoords = pd.DataFrame(ycoords, columns=['y_begin', 'y_end']) times = pd.DataFrame(times, columns=['ts_begin', 'ts_end']) segdf = pd.concat([xcoords, ycoords, times], axis=1).reset_index() segdf['segment_class'] = seg_class segdf_long = pd.melt( segdf[['index', 'ts_begin', 'ts_end', 'segment_class']], id_vars=['index', 'segment_class']) segdf_long['var'], segdf_long['timept'] = segdf_long['variable'].str.split( '_', 1).str segdf_long = segdf_long[[ 'index', 'segment_class', 'value', 'var', 'timept' ]].copy() segdf_long = segdf_long.pivot_table( index=['index', 'segment_class', 'timept'], columns=['var'], values='value').reset_index().copy() # merge consecutive segments of same class remove_border = segdf_long.duplicated(subset=['ts', 'segment_class'], keep=False) segdf_long = segdf_long[~remove_border] # recalculate segment index segdf_long['segment_index_old'] = segdf_long['index'].copy() segdf_long['segment_index'] = segdf_long.groupby('timept').cumcount() + 1 segdf_long['segment_index'][ segdf_long.timept == 'end'] = np.nan # leave segment indices at begin segdf_long['segment_index'] = segdf_long.segment_index.bfill() segdf_long = segdf_long.pivot_table( index=['ts', 'segment_index'], columns=['timept'], values='segment_class').reset_index().copy() segdf_long.rename(columns={ "segment_index": "begin_segment_index", "begin": "begin_seg_class", "end": "end_seg_class" }, inplace=True) df = df.reset_index(drop=True).merge(segdf_long, how='left').copy() return df
reestimate_observations = nslr_hmm.reestimate_observations_baum_welch # Estimate new parameters based on the data transition_probs, observation_model = reestimate_observations(session_features, # Setting either of these False can avoid # failure due to a class getting zero probability. estimate_transition_model=False, estimate_observation_model=True, # Enable to show an animated plot of the observation model estimation plot_process=True, ) # Classify one session using the new model(s) t, eye, outliers = sessions[0] sample_class, segmentation, seg_class = nslr_hmm.classify_gaze(t, eye, outliers=outliers, transition_probs=transition_probs, observation_model=observation_model) # Plot the resulting classification plt.figure() COLORS = { nslr_hmm.FIXATION: 'blue', nslr_hmm.SACCADE: 'black', nslr_hmm.SMOOTH_PURSUIT: 'green', nslr_hmm.PSO: 'yellow', } plt.plot(t, eye[:,0], '.') for i, seg in enumerate(segmentation.segments): cls = seg_class[i] plt.plot(seg.t, np.array(seg.x)[:,0], color=COLORS[cls])