def test_export_edf_annotations(tmp_path): """Test that exporting EDF preserves annotations.""" rng = np.random.RandomState(123456) format = 'edf' ch_types = [ 'eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', 'eog', 'ecg', 'emg', 'dbs', 'bio' ] ch_names = np.arange(len(ch_types)).astype(str).tolist() info = create_info(ch_names, sfreq=1000, ch_types=ch_types) data = rng.random(size=(len(ch_names), 2000)) * 1.e-5 raw = RawArray(data, info) annotations = Annotations(onset=[0.01, 0.05, 0.90, 1.05], duration=[0, 1, 0, 0], description=['test1', 'test2', 'test3', 'test4']) raw.set_annotations(annotations) # export temp_fname = op.join(str(tmp_path), f'test.{format}') raw.export(temp_fname) # read in the file raw_read = read_raw_edf(temp_fname, preload=True) assert_array_equal(raw.annotations.onset, raw_read.annotations.onset) assert_array_equal(raw.annotations.duration, raw_read.annotations.duration) assert_array_equal(raw.annotations.description, raw_read.annotations.description)
def test_chunk_duration(): """Test chunk_duration.""" # create dummy raw raw = RawArray(data=np.empty([10, 10], dtype=np.float64), info=create_info(ch_names=10, sfreq=1.), first_samp=0) raw.info['meas_date'] = 0 raw.set_annotations(Annotations(description='foo', onset=[0], duration=[10], orig_time=None)) # expected_events = [[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 0, 1], .. # [9, 0, 1], [9, 0, 1]] expected_events = np.atleast_2d(np.repeat(range(10), repeats=2)).T expected_events = np.insert(expected_events, 1, 0, axis=1) expected_events = np.insert(expected_events, 2, 1, axis=1) events, events_id = events_from_annotations(raw, chunk_duration=.5, use_rounding=False) assert_array_equal(events, expected_events) # test chunk durations that do not fit equally in annotation duration expected_events = np.zeros((3, 3)) expected_events[:, -1] = 1 expected_events[:, 0] = np.arange(0, 9, step=3) events, events_id = events_from_annotations(raw, chunk_duration=3.) assert_array_equal(events, expected_events)
def test_annotation_omit(): """Test raw.get_data with annotations.""" data = np.concatenate([np.ones((1, 1000)), 2 * np.ones((1, 1000))], -1) info = create_info(1, 1000., 'eeg') raw = RawArray(data, info) raw.set_annotations(Annotations([0.5], [1], ['bad'])) expected = raw[0][0] assert_allclose(raw.get_data(reject_by_annotation=None), expected) # nan expected[0, 500:1500] = np.nan assert_allclose(raw.get_data(reject_by_annotation='nan'), expected) got = np.concatenate([ raw.get_data(start=start, stop=stop, reject_by_annotation='nan') for start, stop in ((0, 1000), (1000, 2000)) ], -1) assert_allclose(got, expected) # omit expected = expected[:, np.isfinite(expected[0])] assert_allclose(raw.get_data(reject_by_annotation='omit'), expected) got = np.concatenate([ raw.get_data(start=start, stop=stop, reject_by_annotation='omit') for start, stop in ((0, 1000), (1000, 2000)) ], -1) assert_allclose(got, expected) pytest.raises(ValueError, raw.get_data, reject_by_annotation='foo')
def _raw_annot(meas_date, orig_time): info = create_info(ch_names=10, sfreq=10.) raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) raw.info['meas_date'] = meas_date annot = Annotations([.5], [.2], ['dummy'], orig_time) raw.set_annotations(annotations=annot) return raw
def _raw_annot(meas_date, orig_time, sync_orig=True): info = create_info(ch_names=10, sfreq=10.) raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) raw.info['meas_date'] = meas_date annot = Annotations([.5], [.2], ['dummy'], orig_time) raw.set_annotations(annotations=annot, sync_orig=sync_orig) return raw
def raw_factory(meas_date): raw = RawArray(data=np.empty((10, 10)), info=create_info(ch_names=10, sfreq=10.), first_samp=10) raw.set_meas_date(meas_date) raw.set_annotations(annotations=Annotations( onset=[.5], duration=[.2], description='dummy', orig_time=None)) return raw
def raw_factory(meas_date): raw = RawArray(data=np.empty((10, 10)), info=create_info(ch_names=10, sfreq=10., ), first_samp=10) raw.info['meas_date'] = meas_date raw.set_annotations(annotations=Annotations(onset=[.5], duration=[.2], description='dummy', orig_time=None)) return raw
def test_negative_meas_dates(windows_like_datetime): """Test meas_date previous to 1970.""" # Regression test for gh-6621 raw = RawArray(data=np.empty((1, 1), dtype=np.float64), info=create_info(ch_names=1, sfreq=1.)) raw.set_meas_date((-908196946, 988669)) raw.set_annotations(Annotations(description='foo', onset=[0], duration=[0], orig_time=None)) events, _ = events_from_annotations(raw) assert events[:, 0] == 0
def test_raw_reject(): """Test raw data getter with annotation reject.""" sfreq = 100. info = create_info(['a', 'b', 'c', 'd', 'e'], sfreq, ch_types='eeg') raw = RawArray(np.ones((5, 15000)), info) with pytest.warns(RuntimeWarning, match='outside the data range'): raw.set_annotations( Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD')) data, times = raw.get_data( [0, 1, 3, 4], 100, 11200, # 1-112 sec 'omit', return_times=True) bad_times = np.concatenate([ np.arange(200, 400), np.arange(10000, 10800), np.arange(10500, 11000) ]) expected_times = np.setdiff1d(np.arange(100, 11200), bad_times) / sfreq assert_allclose(times, expected_times) # with orig_time and complete overlap raw = read_raw_fif(fif_fname) raw.set_annotations( Annotations(onset=[1, 4, 5] + raw._first_time, duration=[1, 3, 1], description='BAD', orig_time=raw.info['meas_date'])) t_stop = 18. assert raw.times[-1] > t_stop n_stop = int(round(t_stop * raw.info['sfreq'])) n_drop = int(round(4 * raw.info['sfreq'])) assert len(raw.times) >= n_stop data, times = raw.get_data(range(10), 0, n_stop, 'omit', True) assert data.shape == (10, n_stop - n_drop) assert times[-1] == raw.times[n_stop - 1] assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) data, times = raw.get_data(range(10), 0, n_stop, 'NaN', True) assert_array_equal(data.shape, (10, n_stop)) assert times[-1] == raw.times[n_stop - 1] t_1, t_2 = raw.time_as_index([1, 2], use_rounding=True) assert np.isnan(data[:, t_1:t_2]).all() # 1s -2s assert not np.isnan(data[:, :t_1].any()) assert not np.isnan(data[:, t_2:].any()) assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) assert_array_equal(raw.get_data(), raw[:][0]) # Test _sync_onset times = [10, -88, 190] onsets = _sync_onset(raw, times) assert_array_almost_equal(onsets, times - raw.first_samp / raw.info['sfreq']) assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
def test_flat_bad_acq_skip(): """Test that acquisition skips are handled properly.""" # -- file with a couple of skip and flat channels -- raw = read_raw_fif(skip_fname, preload=True) annots, bads = annotate_amplitude(raw, flat=0) assert len(annots) == 0 assert bads == [ # MaxFilter finds the same 21 channels 'MEG%04d' % (int(num),) for num in '141 331 421 431 611 641 1011 1021 1031 1241 1421 ' '1741 1841 2011 2131 2141 2241 2531 2541 2611 2621'.split()] # -- overlap of flat segment with bad_acq_skip -- n_ch, n_times = 11, 1000 data = np.random.RandomState(0).randn(n_ch, n_times) assert not (np.diff(data, axis=-1) == 0).any() # nothing flat at first info = create_info(n_ch, 1000., 'eeg') raw = RawArray(data, info, first_samp=0) raw.info['bads'] = [raw.ch_names[-1]] bad_acq_skip = Annotations([0.5], [0.2], ['bad_acq_skip'], orig_time=None) raw.set_annotations(bad_acq_skip) # add flat channel overlapping with the left edge of bad_acq_skip raw_ = raw.copy() raw_._data[0, 400:600] = 0. annots, bads = annotate_amplitude(raw_, peak=None, flat=0, bad_percent=25) assert len(annots) == 1 assert len(bads) == 0 # check annotation instance assert annots[0]['description'] == 'BAD_flat' _check_annotation(raw_, annots[0], None, 0, 400, 499) # add flat channel overlapping with the right edge of bad_acq_skip raw_ = raw.copy() raw_._data[0, 600:800] = 0. annots, bads = annotate_amplitude(raw_, peak=None, flat=0, bad_percent=25) assert len(annots) == 1 assert len(bads) == 0 # check annotation instance assert annots[0]['description'] == 'BAD_flat' _check_annotation(raw_, annots[0], None, 0, 700, 799) # add flat channel overlapping entirely with bad_acq_skip raw_ = raw.copy() raw_._data[0, 200:800] = 0. annots, bads = annotate_amplitude(raw_, peak=None, flat=0, bad_percent=41) assert len(annots) == 2 assert len(bads) == 0 # check annotation instance annots = sorted(annots, key=lambda x: x['onset']) assert all(annot['description'] == 'BAD_flat' for annot in annots) _check_annotation(raw_, annots[0], None, 0, 200, 500) _check_annotation(raw_, annots[1], None, 0, 700, 799)
def test_raw_reject(): """Test raw data getter with annotation reject.""" sfreq = 100. info = create_info(['a', 'b', 'c', 'd', 'e'], sfreq, ch_types='eeg') raw = RawArray(np.ones((5, 15000)), info) with pytest.warns(RuntimeWarning, match='outside the data range'): raw.set_annotations(Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD')) data, times = raw.get_data([0, 1, 3, 4], 100, 11200, # 1-112 sec 'omit', return_times=True) bad_times = np.concatenate([np.arange(200, 400), np.arange(10000, 10800), np.arange(10500, 11000)]) expected_times = np.setdiff1d(np.arange(100, 11200), bad_times) / sfreq assert_allclose(times, expected_times) # with orig_time and complete overlap raw = read_raw_fif(fif_fname) raw.set_annotations(Annotations(onset=[1, 4, 5] + raw._first_time, duration=[1, 3, 1], description='BAD', orig_time=raw.info['meas_date'])) t_stop = 18. assert raw.times[-1] > t_stop n_stop = int(round(t_stop * raw.info['sfreq'])) n_drop = int(round(4 * raw.info['sfreq'])) assert len(raw.times) >= n_stop data, times = raw.get_data(range(10), 0, n_stop, 'omit', True) assert data.shape == (10, n_stop - n_drop) assert times[-1] == raw.times[n_stop - 1] assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) data, times = raw.get_data(range(10), 0, n_stop, 'NaN', True) assert_array_equal(data.shape, (10, n_stop)) assert times[-1] == raw.times[n_stop - 1] t_1, t_2 = raw.time_as_index([1, 2], use_rounding=True) assert np.isnan(data[:, t_1:t_2]).all() # 1s -2s assert not np.isnan(data[:, :t_1].any()) assert not np.isnan(data[:, t_2:].any()) assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) assert_array_equal(raw.get_data(), raw[:][0]) # Test _sync_onset times = [10, -88, 190] onsets = _sync_onset(raw, times) assert_array_almost_equal(onsets, times - raw.first_samp / raw.info['sfreq']) assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
def _convert_to_raw(self, data, s): fs = 500 ch_names = [ 'Fp1', 'F5', 'F3', 'F1', 'Fz', 'FC5', 'FC3', 'FC1', 'FCz', 'C5', 'C3', 'C1', 'Cz', 'CP5', 'CP3', 'CP1', 'CPz', 'P5', 'P3', 'P1', 'Pz' ] ch_types = ['eeg'] * 21 info = create_info(ch_names, fs, ch_types) info['description'] = 'PGHealthy' x = data['Signals'] * 1e-6 raw = RawArray(x, info) epoch_start = np.array(data['Epoch_start']).T N = len(epoch_start) events = np.c_[epoch_start, np.zeros(N), np.ones(N) * (s + 1)] events = events.astype(np.int) raw.set_montage('standard_1005') mapping = {v: k for k, v in self.event_id.items()} onsets = events[:, 0] / raw.info['sfreq'] durations = np.zeros_like(onsets) # assumes instantaneous events descriptions = [mapping[ev_id] for ev_id in events[:, 2]] annot_from_events = Annotations(onset=onsets, duration=durations, description=descriptions) raw.notch_filter(50, verbose=False) return raw.set_annotations(annot_from_events)
def test_allow_nan_durations(): """Deal with "n/a" strings in BIDS events with nan durations.""" raw = RawArray(data=np.empty([2, 10], dtype=np.float64), info=create_info(ch_names=2, sfreq=1.), first_samp=0) raw.set_meas_date(0) ons = [1, 2., 15., 17.] dus = [np.nan, 1., 0.5, np.nan] descriptions = ['A'] * 4 onsets = np.asarray(ons, dtype=float) durations = np.asarray(dus, dtype=float) annot = mne.Annotations(onset=onsets, duration=durations, description=descriptions) with pytest.warns(RuntimeWarning, match='Omitted 2 annotation'): raw.set_annotations(annot)
def _create_annotation_based_on_descr(description, annotation_start_sampl=0, duration=0, orig_time=0): """Create a raw object with annotations from descriptions. The returning raw object contains as many annotations as description given. All starting at `annotation_start_sampl`. """ # create dummy raw raw = RawArray(data=np.empty([10, 10], dtype=np.float64), info=create_info(ch_names=10, sfreq=1000.), first_samp=0) raw.info['meas_date'] = 0 # create dummy annotations based on the descriptions onset = raw.times[annotation_start_sampl] onset_matching_desc = np.full_like(description, onset, dtype=type(onset)) duration_matching_desc = np.full_like(description, duration, dtype=type(duration)) annot = Annotations(description=description, onset=onset_matching_desc, duration=duration_matching_desc, orig_time=orig_time) if duration != 0: with pytest.warns(RuntimeWarning, match='Limited.*expanding outside'): # duration 0.1s is larger than the raw data expand raw.set_annotations(annot) else: raw.set_annotations(annot) # Make sure that set_annotations(annot) works assert all(raw.annotations.onset == onset) if duration != 0: expected_duration = (len(raw.times) / raw.info['sfreq']) - onset else: expected_duration = 0 _duration = raw.annotations.duration[0] assert _duration == approx(expected_duration) assert all(raw.annotations.duration == _duration) assert all(raw.annotations.description == description) return raw
def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6)
def test_get_data_reject(): """Test if reject_by_annotation is working correctly.""" fs = 256 ch_names = ["C3", "Cz", "C4"] info = create_info(ch_names, sfreq=fs) raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info) raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2], description="bad")) with catch_logging() as log: data = raw.get_data(reject_by_annotation="omit", verbose=True) msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' + ' (60.00%) samples.') assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 1536) with catch_logging() as log: data = raw.get_data(reject_by_annotation="nan", verbose=True) msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' + ' (60.00%) samples.') assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 2560) # shape doesn't change assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead
def test_annotation_omit(): """Test raw.get_data with annotations.""" data = np.concatenate([np.ones((1, 1000)), 2 * np.ones((1, 1000))], -1) info = create_info(1, 1000., 'eeg') raw = RawArray(data, info) raw.set_annotations(Annotations([0.5], [1], ['bad'])) expected = raw[0][0] assert_allclose(raw.get_data(reject_by_annotation=None), expected) # nan expected[0, 500:1500] = np.nan assert_allclose(raw.get_data(reject_by_annotation='nan'), expected) got = np.concatenate([raw.get_data(start=start, stop=stop, reject_by_annotation='nan') for start, stop in ((0, 1000), (1000, 2000))], -1) assert_allclose(got, expected) # omit expected = expected[:, np.isfinite(expected[0])] assert_allclose(raw.get_data(reject_by_annotation='omit'), expected) got = np.concatenate([raw.get_data(start=start, stop=stop, reject_by_annotation='omit') for start, stop in ((0, 1000), (1000, 2000))], -1) assert_allclose(got, expected) pytest.raises(ValueError, raw.get_data, reject_by_annotation='foo')
def raw_epochs_sphere(): """Get the MATLAB EEG data.""" n_times = 386 mat_contents = sio.loadmat(eeg_fname) data = mat_contents['data'] n_channels, n_epochs = data.shape[0], data.shape[1] // n_times sfreq = 250. ch_names = ['E%i' % i for i in range(1, n_channels + 1, 1)] ch_types = ['eeg'] * n_channels info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) raw = RawArray(data=data, info=info) montage = make_standard_montage('GSN-HydroCel-257') raw.set_montage(montage) onset = raw.times[np.arange(50, n_epochs * n_times, n_times)] raw.set_annotations(Annotations(onset=onset, duration=np.repeat(0.1, 3), description=np.repeat('foo', 3))) events, event_id = events_from_annotations(raw) epochs = Epochs(raw, events, event_id, tmin=-.2, tmax=1.34, preload=True, reject=None, picks=None, baseline=(None, 0), verbose=False) sphere = (0., 0., 0., 0.095) return raw, epochs, sphere
def test_raw_array_orig_times(): """Test combining with RawArray and orig_times.""" data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) meas_date = _handle_meas_date(np.pi) info['meas_date'] = meas_date raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) assert_allclose(raws[0].annotations.onset, [124, 125]) raw = RawArray(data.copy(), info) assert not len(raw.annotations) raw.set_annotations(Annotations([1.], [.5], 'x', None)) assert_allclose(raw.annotations.onset, [1.]) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') assert raw.info['meas_date'] == raw.annotations.orig_time == meas_date assert_and_remove_boundary_annot(raw, 3) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [5., 124., 125., 135., 144., 145., 154.]) assert_array_equal(raw.annotations.duration, [1.5, .5, .5, .5, .5, .5, .5]) assert_array_equal(raw.annotations.description, ['y', 'x', 'x', 'x', 'x', 'x', 'x']) # These three things should be equivalent stamp = _dt_to_stamp(raw.info['meas_date']) orig_time = _handle_meas_date(stamp) for empty_annot in ( Annotations([], [], [], stamp), Annotations([], [], [], orig_time), Annotations([], [], [], None), None): raw.set_annotations(empty_annot) assert isinstance(raw.annotations, Annotations) assert len(raw.annotations) == 0 assert raw.annotations.orig_time == orig_time
def test_raw_array_orig_times(): """Test combining with RawArray and orig_times.""" data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') assert_and_remove_boundary_annot(raw, 3) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [5., 124., 125., 135., 144., 145., 154.]) assert_array_equal(raw.annotations.duration, [1.5, .5, .5, .5, .5, .5, .5]) assert_array_equal(raw.annotations.description, ['y', 'x', 'x', 'x', 'x', 'x', 'x']) # These three things should be equivalent expected_orig_time = (raw.info['meas_date'][0] + raw.info['meas_date'][1] / 1000000) for empty_annot in ( Annotations([], [], [], expected_orig_time), Annotations([], [], [], None), None): raw.set_annotations(empty_annot) assert isinstance(raw.annotations, Annotations) assert len(raw.annotations) == 0 assert raw.annotations.orig_time == expected_orig_time
def simulate_nirs_raw(sfreq=3., amplitude=1., sig_dur=300., stim_dur=5., isi_min=15., isi_max=45.): """ Create simulated data. .. warning:: Work in progress: I am trying to think on the best API. Parameters ---------- sfreq : Number The sample rate. amplitude : Number The amplitude of the signal to simulate in uM. sig_dur : Number The length of the signal to generate in seconds. stim_dur : Number The length of the stimulus to generate in seconds. isi_min : Number The minimum duration of the inter stimulus interval in seconds. isi_max : Number The maximum duration of the inter stimulus interval in seconds. Returns ------- raw : instance of Raw The generated raw instance. """ from nilearn.stats.first_level_model import make_first_level_design_matrix from pandas import DataFrame frame_times = np.arange(sig_dur * sfreq) / sfreq onset = 0. onsets = [] conditions = [] durations = [] while onset < sig_dur - 60: onset += np.random.uniform(isi_min, isi_max) + stim_dur onsets.append(onset) conditions.append("A") durations.append(stim_dur) events = DataFrame({ 'trial_type': conditions, 'onset': onsets, 'duration': durations }) dm = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=0) annotations = Annotations(onsets, durations, conditions) info = create_info(ch_names=['Simulated'], sfreq=sfreq, ch_types=['hbo']) raw = RawArray(dm[["A"]].to_numpy().T * amplitude * 1.e-6, info, verbose=False) raw.set_annotations(annotations) return raw
def simulate_nirs_raw(sfreq=3., amplitude=1., annot_desc='A', sig_dur=300., stim_dur=5., isi_min=15., isi_max=45., ch_name='Simulated', hrf_model='glover'): """ Create simulated fNIRS data. The returned data is of type `hbo`. One or more conditions can be simulated. To simulate multiple conditions pass in a description and amplitude for each `amplitude=[0., 2., 4.], annot_desc=['Control', 'Cond_A', 'Cond_B']`. Parameters ---------- sfreq : Number The sample rate. amplitude : Number, Array of numbers The amplitude of the signal to simulate in uM. Pass in an array to simulate multiple conditions. annot_desc : str, Array of str The name of the annotations for simulated amplitudes. Pass in an array to simulate multiple conditions, must be the same length as amplitude. sig_dur : Number The length of the boxcar signal to generate in seconds that will be convolved with the HRF. stim_dur : Number, Array of numbers The length of the stimulus to generate in seconds. isi_min : Number The minimum duration of the inter stimulus interval in seconds. isi_max : Number The maximum duration of the inter stimulus interval in seconds. ch_name : str Channel name to be used in returned raw instance. hrf_model : str Specifies the hemodynamic response function. See nilearn docs. Returns ------- raw : instance of Raw The generated raw instance. """ from nilearn.glm.first_level import make_first_level_design_matrix from pandas import DataFrame if type(amplitude) is not list: amplitude = [amplitude] if type(annot_desc) is not list: annot_desc = [annot_desc] if type(stim_dur) is not list: stim_dur = [stim_dur] frame_times = np.arange(sig_dur * sfreq) / sfreq assert len(amplitude) == len(annot_desc), "Same number of amplitudes as " \ "annotations required." assert len(amplitude) == len(stim_dur), "Same number of amplitudes as " \ "durations required." onset = 0. onsets = [] conditions = [] durations = [] while onset < sig_dur - 60: c_idx = np.random.randint(0, len(amplitude)) onset += np.random.uniform(isi_min, isi_max) + stim_dur[c_idx] onsets.append(onset) conditions.append(annot_desc[c_idx]) durations.append(stim_dur[c_idx]) events = DataFrame({ 'trial_type': conditions, 'onset': onsets, 'duration': durations }) dm = make_first_level_design_matrix(frame_times, events, hrf_model=hrf_model, drift_model='polynomial', drift_order=0) dm = dm.drop(columns='constant') annotations = Annotations(onsets, durations, conditions) info = create_info(ch_names=[ch_name], sfreq=sfreq, ch_types=['hbo']) for idx, annot in enumerate(annot_desc): if annot in dm.columns: dm[annot] *= amplitude[idx] a = np.sum(dm.to_numpy(), axis=1) * 1.e-6 a = a.reshape(-1, 1).T raw = RawArray(a, info, verbose=False) raw.set_annotations(annotations) return raw
def test_find_events(): """Test find events in raw file.""" events = read_events(fname) raw = read_raw_fif(raw_fname, preload=True) # let's test the defaulting behavior while we're at it extra_ends = ['', '_1'] orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends] os.environ['MNE_STIM_CHANNEL'] = 'STI 014' if 'MNE_STIM_CHANNEL_1' in os.environ: del os.environ['MNE_STIM_CHANNEL_1'] events2 = find_events(raw) assert_array_almost_equal(events, events2) # now test with mask events11 = find_events(raw, mask=3, mask_type='not_and') with pytest.warns(RuntimeWarning, match='events masked'): events22 = read_events(fname, mask=3, mask_type='not_and') assert_array_equal(events11, events22) # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], include=[stim_channel]) # test digital masking raw._data[stim_channel_idx, :5] = np.arange(5) raw._data[stim_channel_idx, 5:] = 0 # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100' pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and') pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah') # testing mask_type. default = 'not_and' assert_array_equal( find_events(raw, shortest_event=1, mask=1, mask_type='not_and'), [[2, 0, 2], [4, 2, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=2, mask_type='not_and'), [[1, 0, 1], [3, 0, 1], [4, 1, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=3, mask_type='not_and'), [[4, 0, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=4, mask_type='not_and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) # testing with mask_type = 'and' assert_array_equal( find_events(raw, shortest_event=1, mask=1, mask_type='and'), [[1, 0, 1], [3, 0, 1]]) assert_array_equal( find_events(raw, shortest_event=1, mask=2, mask_type='and'), [[2, 0, 2]]) assert_array_equal( find_events(raw, shortest_event=1, mask=3, mask_type='and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) assert_array_equal( find_events(raw, shortest_event=1, mask=4, mask_type='and'), [[4, 0, 4]]) # test empty events channel raw._data[stim_channel_idx, :] = 0 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, :4] = 1 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, -1:] = 9 assert_array_equal(find_events(raw), [[14399, 0, 9]]) # Test that we can handle consecutive events with no gap raw._data[stim_channel_idx, 10:20] = 5 raw._data[stim_channel_idx, 20:30] = 6 raw._data[stim_channel_idx, 30:32] = 5 raw._data[stim_channel_idx, 40] = 6 assert_array_equal(find_events(raw, consecutive=False), [[10, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, consecutive=True), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw), [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, output='offset', consecutive=False), [[31, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, output='offset', consecutive=True), [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]]) pytest.raises(ValueError, find_events, raw, output='step', consecutive=True) assert_array_equal( find_events(raw, output='step', consecutive=True, shortest_event=1), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6], [41, 6, 0], [14399, 0, 9], [14400, 9, 0]]) assert_array_equal(find_events(raw, output='offset'), [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002), [[10, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002), [[10, 0, 5], [20, 5, 6], [30, 6, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=False, min_duration=0.002), [[31, 0, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=True, min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003), [[10, 0, 5], [20, 5, 6]]) # test find_stim_steps merge parameter raw._data[stim_channel_idx, :] = 0 raw._data[stim_channel_idx, 0] = 1 raw._data[stim_channel_idx, 10] = 4 raw._data[stim_channel_idx, 11:20] = 5 assert_array_equal( find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel), [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=-1, stim_channel=stim_channel), [[1, 1, 0], [10, 0, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=1, stim_channel=stim_channel), [[1, 1, 0], [11, 0, 5], [20, 5, 0]]) # put back the env vars we trampled on for s, o in zip(extra_ends, orig_envs): if o is not None: os.environ['MNE_STIM_CHANNEL%s' % s] = o # Test with list of stim channels raw._data[stim_channel_idx, 1:101] = np.zeros(100) raw._data[stim_channel_idx, 10:11] = 1 raw._data[stim_channel_idx, 30:31] = 3 stim_channel2 = 'STI 015' stim_channel2_idx = pick_channels(raw.info['ch_names'], include=[stim_channel2]) raw._data[stim_channel2_idx, :] = 0 raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105] events1 = find_events(raw, stim_channel='STI 014') events2 = events1.copy() events2[:, 0] -= 5 events = find_events(raw, stim_channel=['STI 014', stim_channel2]) assert_array_equal(events[::2], events2) assert_array_equal(events[1::2], events1) # test initial_event argument info = create_info(['MYSTI'], 1000, 'stim') data = np.zeros((1, 1000)) raw = RawArray(data, info) data[0, :10] = 100 data[0, 30:40] = 200 assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]]) assert_array_equal(find_events(raw, 'MYSTI', initial_event=True), [[0, 0, 100], [30, 0, 200]]) # test error message for raw without stim channels raw = read_raw_fif(raw_fname, preload=True) raw.pick_types(meg=True, stim=False) # raw does not have annotations with pytest.raises(ValueError, match="'stim_channel'"): find_events(raw) # if raw has annotations, we show a different error message raw.set_annotations(Annotations(0, 2, "test")) with pytest.raises(ValueError, match="mne.events_from_annotations"): find_events(raw)
def test_basics(): """Test annotation class.""" raw = read_raw_fif(fif_fname) assert raw.annotations is not None # XXX to be fixed in #5416 assert len(raw.annotations.onset) == 0 # XXX to be fixed in #5416 pytest.raises(IOError, read_annotations, fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) pytest.raises(ValueError, Annotations, onset, duration, description[:9]) pytest.raises(ValueError, Annotations, [onset, 1], duration, description) pytest.raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() delta = raw.times[-1] + 1. / raw.info['sfreq'] orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2._first_time) offset = orig_time - _handle_meas_date(raw2.info['meas_date']) annot = Annotations(onset, duration, description, orig_time) assert ' segments' in repr(annot) raw2.set_annotations(annot) assert_array_equal(raw2.annotations.onset, onset + offset) assert id(raw2.annotations) != id(annot) concatenate_raws([raw, raw2]) raw.annotations.delete(-1) # remove boundary annotations raw.annotations.delete(-1) assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') boundary_idx = np.where(raw.annotations.description == 'BAD boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) boundary_idx = np.where(raw.annotations.description == 'EDGE boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154., 5.]) assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, .5, 1.5]) assert_array_equal(raw.annotations.description, ['x', 'x', 'x', 'x', 'x', 'x', 'y'])
def simulate_nirs_raw(sfreq=3., amplitude=1., annot_desc='A', sig_dur=300., stim_dur=5., isi_min=15., isi_max=45., ch_name='Simulated'): """ Create simulated data. .. warning:: Work in progress: I am trying to think on the best API. Parameters ---------- sfreq : Number The sample rate. amplitude : Number, Array of numbers The amplitude of the signal to simulate in uM. annot_desc : String, Array of strings The name of the annotations for simulated amplitudes. sig_dur : Number The length of the signal to generate in seconds. stim_dur : Number, Array of numbers The length of the stimulus to generate in seconds. isi_min : Number The minimum duration of the inter stimulus interval in seconds. isi_max : Number The maximum duration of the inter stimulus interval in seconds. ch_name : String Channel name to be used in returned raw instance. Returns ------- raw : instance of Raw The generated raw instance. """ from nilearn.glm.first_level import make_first_level_design_matrix from pandas import DataFrame if type(amplitude) is not list: amplitude = [amplitude] if type(annot_desc) is not list: annot_desc = [annot_desc] if type(stim_dur) is not list: stim_dur = [stim_dur] frame_times = np.arange(sig_dur * sfreq) / sfreq assert len(amplitude) == len(annot_desc), "Same number of amplitudes as " \ "annotations required." assert len(amplitude) == len(stim_dur), "Same number of amplitudes as " \ "durations required." onset = 0. onsets = [] conditions = [] durations = [] while onset < sig_dur - 60: c_idx = np.random.randint(0, len(amplitude)) onset += np.random.uniform(isi_min, isi_max) + stim_dur[c_idx] onsets.append(onset) conditions.append(annot_desc[c_idx]) durations.append(stim_dur[c_idx]) events = DataFrame({ 'trial_type': conditions, 'onset': onsets, 'duration': durations }) dm = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=0) dm = dm.drop(columns='constant') annotations = Annotations(onsets, durations, conditions) info = create_info(ch_names=[ch_name], sfreq=sfreq, ch_types=['hbo']) for idx, annot in enumerate(annot_desc): if annot in dm.columns: dm[annot] *= amplitude[idx] a = np.sum(dm.to_numpy(), axis=1) * 1.e-6 a = a.reshape(-1, 1).T raw = RawArray(a, info, verbose=False) raw.set_annotations(annotations) return raw
def test_basics(): """Test annotation class.""" raw = read_raw_fif(fif_fname) assert raw.annotations is not None # XXX to be fixed in #5416 assert len(raw.annotations.onset) == 0 # XXX to be fixed in #5416 pytest.raises(IOError, read_annotations, fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) pytest.raises(ValueError, Annotations, onset, duration, description[:9]) pytest.raises(ValueError, Annotations, [onset, 1], duration, description) pytest.raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() delta = raw.times[-1] + 1. / raw.info['sfreq'] orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2._first_time) offset = orig_time - _handle_meas_date(raw2.info['meas_date']) annot = Annotations(onset, duration, description, orig_time) assert ' segments' in repr(annot) raw2.set_annotations(annot) assert_array_equal(raw2.annotations.onset, onset + offset) assert id(raw2.annotations) != id(annot) concatenate_raws([raw, raw2]) assert_and_remove_boundary_annot(raw) assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') assert_and_remove_boundary_annot(raw, 3) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [5., 124., 125., 135., 144., 145., 154.]) assert_array_equal(raw.annotations.duration, [1.5, .5, .5, .5, .5, .5, .5]) assert_array_equal(raw.annotations.description, ['y', 'x', 'x', 'x', 'x', 'x', 'x'])