def test_two_pd_alignment(): """Test spliting photodiode events into two and adding.""" out_dir = _TempDir() raw, _, events, _ = pd_parser.simulate_pd_data(prop_corrupted=0.) fname = op.join(out_dir, 'test-raw.fif') raw.save(fname) events2 = events[::2] events3 = events[1:][::2] # make behavior data np.random.seed(12) beh_events2 = events2[:, 0].astype(float) / raw.info['sfreq'] offsets2 = np.random.random(len(beh_events2)) * 0.05 - 0.025 beh_events2 += offsets2 # make next one beh_events3 = events3[:, 0].astype(float) / raw.info['sfreq'] offsets3 = np.random.random(len(beh_events3)) * 0.05 - 0.025 beh_events3 += offsets3 n_na = abs(len(beh_events2) - len(beh_events3)) if len(beh_events2) > len(beh_events3): beh_events3 = list(beh_events3) + ['n/a'] * n_na elif len(beh_events3) > len(beh_events2): beh_events2 = list(beh_events2) + ['n/a'] * n_na beh = dict(trial=np.arange(len(beh_events2)), fix_onset_time=beh_events2, response_onset_time=beh_events3) behf = op.join(out_dir, 'behf-test.tsv') _to_tsv(behf, beh) pd_parser.parse_pd(fname, pd_event_name='Fixation', beh=beh, pd_ch_names=['pd'], beh_key='fix_onset_time', zscore=20, exclude_shift=0.05) pd_parser.parse_pd(fname, pd_event_name='Response', beh=beh, pd_ch_names=['pd'], beh_key='response_onset_time', zscore=20, add_events=True, exclude_shift=0.05) raw = _read_raw(fname) annot, pd_ch_names, beh2 = _load_data(raw) raw.set_annotations(annot) events4, event_id = mne.events_from_annotations(raw) np.testing.assert_array_equal(events4[events4[:, 2] == 1, 0], events2[:, 0]) np.testing.assert_array_equal(events4[events4[:, 2] == 2, 0], events3[:, 0]) assert pd_ch_names == ['pd'] np.testing.assert_array_equal(beh2['pd_parser_sample'], events2[:, 0])
def test_beh_with_nas(): """Test that behavior with 'n/a' entries works properly.""" out_dir = _TempDir() fname, behf, corrupted = make_raw(out_dir) beh = _read_tsv(behf) beh['fix_onset_time'][4] = 'n/a' beh['fix_onset_time'][8] = 'n/a' _, samples = pd_parser.parse_pd(fname, beh=beh, pd_ch_names=['pd']) assert samples == [ 'n/a', 19740, 26978, 33025, 'n/a', 'n/a', 53531, 59601, 'n/a', 'n/a' ]
def test_long_events(): out_dir = _TempDir() raw = mne.io.read_raw_fif(op.join(basepath, 'pd_data2-raw.fif'), preload=True) fname = op.join(out_dir, 'pd_data-raw.fif') raw.save(fname) behf = op.join(basepath, 'pd_events2.tsv') _, samples = pd_parser.parse_pd(fname, beh=behf, beh_key='event', pd_ch_names=['pd'], zscore=20, max_len=4, exclude_shift=1.5, resync=2) assert samples == [ 47900, 55953, 73458, 81293, 99415, 107467, 125972, 134108, 152030, 160482 ]
# save to disk as required by ``pd-parser`` fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif') raw.save(fname) ############################################################################### # Find the photodiode events relative to the behavioral timing of interest # # This function will use the default parameters to find and align the # photodiode events, excluding events that were off. # One percent of the 300 events (3) were corrupted as shown in the plots and # some were too far off from large offsets that we're going to exclude them. pd_parser.parse_pd(fname, pd_event_name='Stim On', beh=beh, pd_ch_names=['pd'], beh_key='time', max_len=1.5) # none are on longer than 1.5 seconds ############################################################################### # Find cessations of the photodiode deflections # # Another piece of information in the photodiode channel is the cessation of # the events. Let's find those and add them to the events. pd_parser.add_pd_off_events(fname, off_event_name='Stim Off', max_len=1.5) ############################################################################### # Check recovered event lengths and compare to the simulation ground truth # # Let's load in the on and off events and plot their difference compared to
def parse_pd(): """Run parse_pd command.""" parser = argparse.ArgumentParser() parser.add_argument('raw', type=str, help='The electrophysiology raw object or filepath') parser.add_argument('--pd_event_name', type=str, required=False, default='Fixation', help='The name of the photodiode event') parser.add_argument('--beh', type=str, required=False, help='The behavioral dictionary or tsv filepath') parser.add_argument('--beh_key', type=str, required=False, default='fix_onset_time', help='The name of the behavioral key (column) ' 'corresponding to the photodiode event timing') parser.add_argument('--pd_ch_names', type=str, nargs='*', required=False, default=None, help='The name(s) of the channels ' 'with the photodiode data. Can be one channel ' 'for common referenced recording or two for ' 'a bipolar recording. If not provided, the data ' 'will be plotted for the user to pick') parser.add_argument('--exclude_shift', type=float, required=False, default=0.05, help='How many seconds off to exclude a ' 'photodiode-behavioral event difference') parser.add_argument('--resync', type=float, required=False, default=0.075, help='How large of a difference ' 'to use to resynchronize events. This is for when ' 'events are off but not by much and so they should ' 'be excluded but are still needed to fit an alignment.' 'Increase if the alignment is failing because too ' 'many events are being excluded, decrease to speed up ' 'execution.') parser.add_argument('--max_len', type=float, required=False, default=1, help='The length of the longest ' 'photodiode event') parser.add_argument('--zscore', type=float, required=False, default=10, help='How many standard deviations ' 'larger than the baseline the photodiode event is. ' 'Decrease if too many events are being found ' 'and increase if too few. Use `find_pd_params` ' 'to determine if unsure.') parser.add_argument('--max_flip_i', type=int, required=False, default=40, help='The maximum number of samples ' 'the photodiode event takes to transition. Increase ' 'if the transitions are not being found, decrease for ' 'fewer false positives.') parser.add_argument('--baseline', type=float, required=False, default=0.25, help='How much relative to the max_len' 'to use to idenify the time before the ' 'photodiode event. Probably don\'t change but ' 'increasing will reduce false-positives and ' 'decreasing will reduce false-negatives.') parser.add_argument('--add_events', action='store_true', help='Whether to run the parser ' 'a second time to add more events from ' 'deflections corresponding to multiple events ' 'on the same channel') parser.add_argument('--recover', action='store_true', help='Whether to recover corrupted events manually.') parser.add_argument('-v', '--verbose', action='store_true', help='Whether to print function progress.') parser.add_argument('-o', '--overwrite', action='store_true', help='Pass this flag to overwrite an existing file') args = parser.parse_args() pd_parser.parse_pd(args.raw, pd_event_name=args.pd_event_name, beh=args.beh, beh_key=args.beh_key, pd_ch_names=args.pd_ch_names, max_len=args.max_len, exclude_shift=args.exclude_shift, resync=args.resync, zscore=args.zscore, max_flip_i=args.max_flip_i, baseline=args.baseline, add_events=args.add_events, recover=args.recover, verbose=args.verbose, overwrite=args.overwrite)
############################################################################### # Find the photodiode events relative to the behavioral timing of interest # # This function will use the default parameters to find and align the # photodiode events, recovering the events that we just corrupted. # # Note that the mock function mocks user input so when you run the example, # you want to delete that line and unindent the next line, and then provide # your own input depending on whether you want to keep the events or not. with mock.patch('builtins.input', return_value='y'): pd_parser.parse_pd(fname, pd_event_name='Stim On', beh=beh, max_len=1.5, pd_ch_names=['pd'], beh_key='time', recover=True) ############################################################################### # Find cessations of the photodiode deflections # # Since we manually intervened for the onsets, on those same trials, we'll # have to manually intervene for the offsets. # # On the documentation webpage, this is example is not interactive, # but if you download it as a jupyter notebook and run it or copy the code # into a console running python (ipython recommended), you can see how to # interact with the window to accept or reject the recovered events by # following the instructions.
pd_parser.find_pd_params(raw, pd_ch_names=['pd']) ############################################################################### # Find the photodiode events relative to the behavioral timing of interest # # This function will use the default parameters or the parameters you # found from :func:`pd_parser.find_pd_parameters` to find and align the # photodiode events, excluding events that were off because the computer # hung up on computation, for instance. That data is saved in the same folder # as the raw file (in this case, a temperary directory generated by # :func:`_TempDir`). The data can be used directly, or it can be accessed via # :func:`pd_parser.pd_parser_save_to_bids` to store it in the brain imagine # data structure (BIDS) standardized format before using it. pd_parser.parse_pd(raw, beh=beh, pd_ch_names=['pd'], max_len=1.5) ############################################################################### # Add events relative to the photodiode events # # The photodiode is usually sychronized to one event (e.g. the fixation # so that if the deflections caused by the photodiode are large enough # to influence other channels through amplifier interactions it doesn't # cause issues with the analysis) so often the events of interest are # relative to the photodiode event. In the task a timer can be started at the # photodiode event and checked each time a subsequent event occurs. # These events should then be recorded in tsv file, which can be passed to # ``pd-parser`` in order to add the events. # Note: if more than one photodiode event is used, the parser can be # used for each event separately using the keyword `add_event=True`.
def test_inputs(): """Test that inputs for functions raise necessary errors.""" out_dir = _TempDir() # test tsv beh = dict(test=[1, 2], test2=[2, 1]) _to_tsv(op.join(out_dir, 'test.tsv'), beh) assert beh == _read_tsv(op.join(out_dir, 'test.tsv')) with pytest.raises(ValueError, match='Unable to read'): _read_tsv('test.foo') with pytest.raises(ValueError, match='Error in reading tsv'): with open(op.join(out_dir, 'test.tsv'), 'w') as _: pass _read_tsv(op.join(out_dir, 'test.tsv')) with pytest.raises(ValueError, match='contains no data'): with open(op.join(out_dir, 'test.tsv'), 'w') as f: f.write('test') _read_tsv(op.join(out_dir, 'test.tsv')) with pytest.raises(ValueError, match='different lengths'): with open(op.join(out_dir, 'test.tsv'), 'w') as f: f.write('test\ttest2\n1\t1\n1') _read_tsv(op.join(out_dir, 'test.tsv')) with pytest.raises(ValueError, match='Empty data file, no keys'): _to_tsv(op.join(out_dir, 'test.tsv'), dict()) with pytest.raises(ValueError, match='Unable to write'): _to_tsv('foo.bar', dict(test=1)) # test read raw, beh, events, corrupted_indices = pd_parser.simulate_pd_data() with pytest.raises(ValueError, match='must be loaded from disk'): _read_raw(raw, preload=True) raw.save(op.join(out_dir, 'test-raw.fif'), overwrite=True) with pytest.raises(ValueError, match='not recognized'): _read_raw('foo.bar') raw2 = _read_raw(op.join(out_dir, 'test-raw.fif'), preload=True) np.testing.assert_array_almost_equal(raw._data, raw2._data, decimal=3) # test load beh with pytest.raises(ValueError, match='not in the columns'): _load_beh(op.join(basepath, 'pd_events.tsv'), 'foo') # test get pd data with pytest.raises(ValueError, match='in raw channel names'): _get_data(raw, ['foo']) with pytest.raises(ValueError, match='in raw channel names'): _get_channel_data(raw, ['foo']) with pytest.raises(ValueError, match='baseline must be between 0 and 1'): pd_parser.parse_pd(raw, beh=beh, baseline=2) with pytest.raises(FileNotFoundError, match='fname does not exist'): _load_data('bar/foo.fif') with pytest.raises(ValueError, match='pd-parser data not found'): raw.save(op.join(out_dir, 'foo.fif')) _load_data(op.join(out_dir, 'foo.fif')) # test i/o raw3 = _read_raw(op.join(out_dir, 'test-raw.fif')) _save_data(raw3, events=np.arange(10), event_id='Fixation', ch_names=['pd'], beh=beh, add_events=False) with pytest.raises(ValueError, match='`pd_parser_sample` is not allowed'): _save_data(raw3, events=events, event_id='Fixation', ch_names=['pd'], beh=beh, add_events=False) annot, pd_ch_names, beh2 = _load_data(raw3) raw.set_annotations(annot) events2, event_id = mne.events_from_annotations(raw) np.testing.assert_array_equal(events2[:, 0], np.arange(10)) assert event_id == {'Fixation': 1} assert pd_ch_names == ['pd'] np.testing.assert_array_equal(beh2['time'], beh['time']) np.testing.assert_array_equal(beh2['pd_parser_sample'], np.arange(10)) # check overwrite behf = op.join(out_dir, 'behf-test.tsv') _to_tsv(behf, beh) with pytest.raises(ValueError, match='directory already exists'): pd_parser.parse_pd(raw3, beh=behf) pd_parser.parse_pd(raw3, beh=None, pd_ch_names=['pd'], overwrite=True) annot, pd_ch_names, beh = _load_data(raw3) raw3.set_annotations(annot) events2, _ = mne.events_from_annotations(raw3) assert all([event in events2[:, 0] for event in events[:, 0]]) assert pd_ch_names == ['pd'] assert beh is None # test overwrite raw = _read_raw(op.join(out_dir, 'test-raw.fif')) with pytest.raises(ValueError, match='data directory already exists'): _check_overwrite(raw, add_events=False, overwrite=False)
def test_parse_pd(_bids_validate): # load in data behf = op.join(basepath, 'pd_beh.tsv') events = _read_tsv(op.join(basepath, 'pd_events.tsv')) events_relative = _read_tsv(op.join(basepath, 'pd_relative_events.tsv')) raw_tmp = mne.io.read_raw_fif(op.join(basepath, 'pd_data-raw.fif'), preload=True) raw_tmp.info['dig'] = None raw_tmp.info['line_freq'] = 60 out_dir = _TempDir() fname = op.join(out_dir, 'pd_data-raw.fif') raw_tmp.save(fname) # this needs to be tested with user interaction, this # just tests that it launches pd_parser.find_pd_params(fname, pd_ch_names=['pd']) plt.close('all') # test core functionality annot, samples = pd_parser.parse_pd(fname, beh=behf, pd_ch_names=['pd'], zscore=20, resync=0.125) plt.close('all') raw = mne.io.read_raw_fif(fname) raw.set_annotations(annot) events2, event_id = mne.events_from_annotations(raw) np.testing.assert_array_equal( events2[:, 0], [e for e in events['pd_parser_sample'] if e != 'n/a']) assert samples == events['pd_parser_sample'] # test add_pd_off_events annot = pd_parser.add_pd_off_events(fname, off_event_name=off_event_name, zscore=20) raw.set_annotations(annot) assert off_event_name in annot.description events2, event_id = mne.events_from_annotations(raw) off_events = events2[events2[:, 2] == event_id[off_event_name]] np.testing.assert_array_equal( off_events[:, 0], [e for e in events['off_sample'] if e != 'n/a']) ''' df = dict(trial=range(300), pd_parser_sample=samples, off_sample=list()) i = 0 for s in samples: df['off_sample'].append('n/a' if s == 'n/a' else off_events[i, 0]) i += s != 'n/a' ''' # test add_pd_relative_events pd_parser.add_relative_events( raw, behf, relative_event_keys=['fix_duration', 'go_time', 'response_time'], relative_event_names=['ISI Onset', 'Go Cue', 'Response']) annot, pd_ch_names, beh = _load_data(raw) raw.set_annotations(annot) events2, event_id = mne.events_from_annotations(raw) np.testing.assert_array_equal(events2[:, 0], events_relative['sample']) assert pd_ch_names == ['pd'] np.testing.assert_array_equal( events2[:, 2], [event_id[tt] for tt in events_relative['trial_type']]) # test add_pd_events_to_raw raw2 = pd_parser.add_events_to_raw(raw, keep_pd_channels=True) events3, event_id2 = mne.events_from_annotations(raw2) np.testing.assert_array_equal(events3, events2) assert event_id2 == event_id # test pd_parser_save_to_bids bids_dir = op.join(out_dir, 'bids_dir') pd_parser.save_to_bids(bids_dir, fname, '1', 'test', verbose=False) _bids_validate(bids_dir)