def check_trial_event_consistency(self): meta = load_stimuli_metadata(self.data_root, self.stimuli_version) sfreq = self.raw.info['sfreq'] n_errors = 0 for i, event in enumerate(self.trial_events[:-1]): event_id = event[2] start = event[0] if event_id < 1000: stim_id, cond = decode_event_id(event_id) if cond in [1, 2]: field = 'length_with_cue' else: field = 'length_without_cue' sample_len = sfreq * meta[stim_id][field] else: sample_len = 1 next_start = self.trial_events[i + 1, 0] if next_start < start + sample_len: expected_len = sample_len / float(sfreq) event_len = (next_start - start) / float(sfreq) log.warn('warning: event {} starts before expected end of {}'. format(self.trial_events[i + 1], event)) log.warn( 'expected length: {:.3f}s, real length: {:.3f}s, delta: {:.3f}s' .format(expected_len, event_len, expected_len - event_len)) n_errors += 1 log.info('{} problems detected.'.format(n_errors))
def check_trial_event_consistency(self): meta = load_stimuli_metadata(self.data_root, self.stimuli_version) sfreq = self.raw.info['sfreq'] n_errors = 0 for i, event in enumerate(self.trial_events[:-1]): event_id = event[2] start = event[0] if event_id < 1000: stim_id, cond = decode_event_id(event_id) if cond in [1,2]: field = 'length_with_cue' else: field = 'length_without_cue' sample_len = sfreq * meta[stim_id][field] else: sample_len = 1 next_start = self.trial_events[i+1, 0] if next_start < start + sample_len: expected_len = sample_len / float(sfreq) event_len = (next_start - start) / float(sfreq) log.warn('warning: event {} starts before expected end of {}'.format(self.trial_events[i+1], event)) log.warn('expected length: {:.3f}s, real length: {:.3f}s, delta: {:.3f}s'.format( expected_len, event_len, expected_len-event_len)) n_errors += 1 log.info('{} problems detected.'.format(n_errors))
def generate_beat_events( trial_events, # base events as stored in raw fif files include_cue_beats=True, # generate events for cue beats as well? use_audio_onset=True, # use the more precise audio onset marker (code 1000) if present exclude_stimulus_ids=[], exclude_condition_ids=[], beat_event_id_generator=default_beat_event_id_generator, sr=512.0, # sample rate, correct value important to compute event frames verbose=False, version=None): ## prepare return value beat_events = [] ## get stimuli meta information meta = load_stimuli_metadata_map(version=version) beats = load_stimuli_metadata_map('beats', verbose=verbose, version=version) if include_cue_beats: cue_beats = load_stimuli_metadata_map('cue_beats') ## determine the number of cue beats num_cue_beats = dict() for stimulus_id in STIMULUS_IDS: num_cue_beats[stimulus_id] = \ meta[stimulus_id]['beats_per_bar'] * meta[stimulus_id]['cue_bars'] if verbose: print(num_cue_beats) ## helper function to add a single beat event def add_beat_event(etime, stimulus_id, condition, beat_count, cue=False): etype = beat_event_id_generator(stimulus_id, condition, cue, beat_count) beat_events.append([etime, 0, etype]) if verbose: print(beat_events[-1]) ## helper function to add a batch of beat events def add_beat_events(etimes, stimulus_id, condition, cue=False): beats_per_bar = meta[stimulus_id]['beats_per_bar'] for i, etime in enumerate(etimes): beat_count = (i % beats_per_bar) + 1 add_beat_event(etime, stimulus_id, condition, beat_count, cue) for i, event in enumerate(trial_events): etype = event[2] etime = event[0] if verbose: print('{:4d} at {:8d}'.format(etype, etime)) if etype >= 1000: # stimulus_id + condition continue stimulus_id, condition = decode_event_id(etype) if stimulus_id in exclude_stimulus_ids or condition in exclude_condition_ids: continue # skip excluded trial_start = etime # default: use trial onset if use_audio_onset and condition < 3: # Note: conditions 3 and 4 have no audio cues next_event = trial_events[i + 1] if next_event[2] == 1000: # only use if audio onset trial_start = next_event[0] if verbose: print('Trial start at {}'.format(trial_start)) if condition < 3: # cued offset = sr * meta[stimulus_id]['length_of_cue'] if include_cue_beats: cue_beat_times = trial_start + np.floor( sr * cue_beats[stimulus_id]) cue_beat_times = cue_beat_times[:num_cue_beats[ stimulus_id]] # truncate at num_cue_beats cue_beat_times = np.asarray(cue_beat_times, dtype=int) if verbose: print(cue_beat_times) add_beat_events(cue_beat_times, stimulus_id, condition, cue=True) else: offset = 0 # no cue beat_times = trial_start + offset + np.floor(sr * beats[stimulus_id]) beat_times = np.asarray(beat_times, dtype=int) if verbose: print(beat_times[:5], '...') add_beat_events(beat_times, stimulus_id, condition) beat_events = np.asarray(beat_events, dtype=int) return beat_events
def generate_beat_events(trial_events, # base events as stored in raw fif files include_cue_beats=True, # generate events for cue beats as well? use_audio_onset=True, # use the more precise audio onset marker (code 1000) if present exclude_stimulus_ids=[], exclude_condition_ids=[], beat_event_id_generator=default_beat_event_id_generator, sr=512.0, # sample rate, correct value important to compute event frames verbose=False, version=None): ## prepare return value beat_events = [] ## get stimuli meta information meta = load_stimuli_metadata_map(version=version) beats = load_stimuli_metadata_map('beats', verbose=verbose, version=version) if include_cue_beats: cue_beats = load_stimuli_metadata_map('cue_beats') ## determine the number of cue beats num_cue_beats = dict() for stimulus_id in STIMULUS_IDS: num_cue_beats[stimulus_id] = \ meta[stimulus_id]['beats_per_bar'] * meta[stimulus_id]['cue_bars'] if verbose: print num_cue_beats ## helper function to add a single beat event def add_beat_event(etime, stimulus_id, condition, beat_count, cue=False): etype = beat_event_id_generator(stimulus_id, condition, cue, beat_count) beat_events.append([etime, 0, etype]) if verbose: print beat_events[-1] ## helper function to add a batch of beat events def add_beat_events(etimes, stimulus_id, condition, cue=False): beats_per_bar = meta[stimulus_id]['beats_per_bar'] for i, etime in enumerate(etimes): beat_count = (i % beats_per_bar) + 1 add_beat_event(etime, stimulus_id, condition, beat_count, cue) for i, event in enumerate(trial_events): etype = event[2] etime = event[0] if verbose: print '{:4d} at {:8d}'.format(etype, etime) if etype >= 1000: # stimulus_id + condition continue stimulus_id, condition = decode_event_id(etype) if stimulus_id in exclude_stimulus_ids or condition in exclude_condition_ids: continue # skip excluded trial_start = etime # default: use trial onset if use_audio_onset and condition < 3: # Note: conditions 3 and 4 have no audio cues next_event = trial_events[i+1] if next_event[2] == 1000: # only use if audio onset trial_start = next_event[0] if verbose: print 'Trial start at {}'.format(trial_start) if condition < 3: # cued offset = sr * meta[stimulus_id]['length_of_cue'] if include_cue_beats: cue_beat_times = trial_start + np.floor(sr * cue_beats[stimulus_id]) cue_beat_times = cue_beat_times[:num_cue_beats[stimulus_id]] # truncate at num_cue_beats cue_beat_times = np.asarray(cue_beat_times, dtype=int) if verbose: print cue_beat_times add_beat_events(cue_beat_times, stimulus_id, condition, cue=True) else: offset = 0 # no cue beat_times = trial_start + offset + np.floor(sr * beats[stimulus_id]) beat_times = np.asarray(beat_times, dtype=int) if verbose: print beat_times[:5], '...' add_beat_events(beat_times, stimulus_id, condition) beat_events = np.asarray(beat_events, dtype=int) return beat_events