def test_filter_picks(): """Test filtering default channel picks""" ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim'] info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256) raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info) # -- Deal with meg mag grad exception ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg') # -- Filter data channels for ch_type in ('mag', 'grad', 'eeg', 'seeg'): picks = dict((ch, ch == ch_type) for ch in ch_types) picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False raw_ = raw.pick_types(copy=True, **picks) # Avoid RuntimeWarning due to Attenuation with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') raw_.filter(10, 30) assert_true(len(w) == 1) # -- Error if no data channel for ch_type in ('misc', 'stim'): picks = dict((ch, ch == ch_type) for ch in ch_types) raw_ = raw.pick_types(copy=True, **picks) assert_raises(RuntimeError, raw_.filter, 10, 30)
def test_proj_raw_duration(duration, sfreq): """Test equivalence of `duration` options.""" n_ch, n_dim = 30, 3 rng = np.random.RandomState(0) signals = rng.randn(n_dim, 10000) mixing = rng.randn(n_ch, n_dim) + [0, 1, 2] data = np.dot(mixing, signals) raw = RawArray(data, create_info(n_ch, sfreq, 'eeg')) raw.set_eeg_reference(projection=True) n_eff = int(round(raw.info['sfreq'] * duration)) # crop to an even "duration" number of epochs stop = ((len(raw.times) // n_eff) * n_eff - 1) / raw.info['sfreq'] raw.crop(0, stop) proj_def = compute_proj_raw(raw, n_eeg=n_dim) proj_dur = compute_proj_raw(raw, duration=duration, n_eeg=n_dim) proj_none = compute_proj_raw(raw, duration=None, n_eeg=n_dim) assert len(proj_dur) == len(proj_none) == len(proj_def) == n_dim # proj_def is not in here because it does not necessarily evenly divide # the signal length: for pu, pn in zip(proj_dur, proj_none): assert_allclose(pu['data']['data'], pn['data']['data']) # but we can test it here since it should still be a small subspace angle: for proj in (proj_dur, proj_none, proj_def): computed = np.concatenate([p['data']['data'] for p in proj], 0) angle = np.rad2deg(linalg.subspace_angles(computed.T, mixing)[0]) assert angle < 1e-5
def test_apply_function_verbose(): """Test apply function verbosity """ n_chan = 2 n_times = 3 ch_names = [str(ii) for ii in range(n_chan)] raw = RawArray(np.zeros((n_chan, n_times)), create_info(ch_names, 1., 'mag')) # test return types in both code paths (parallel / 1 job) assert_raises(TypeError, raw.apply_function, bad_1, None, None, 1) assert_raises(ValueError, raw.apply_function, bad_2, None, None, 1) assert_raises(TypeError, raw.apply_function, bad_1, None, None, 2) assert_raises(ValueError, raw.apply_function, bad_2, None, None, 2) # check our arguments tempdir = _TempDir() test_name = op.join(tempdir, 'test.log') set_log_file(test_name) try: raw.apply_function(printer, None, None, 1, verbose=False) with open(test_name) as fid: assert_equal(len(fid.readlines()), 0) raw.apply_function(printer, None, None, 1, verbose=True) with open(test_name) as fid: assert_equal(len(fid.readlines()), n_chan) finally: set_log_file(None)
def test_annotation_property_deprecation_warning(): """Test that assigning annotations warns and nowhere else.""" with pytest.warns(None) as w: raw = RawArray(np.random.rand(1, 1), create_info(1, 1)) assert len(w) is 0 with pytest.warns(DeprecationWarning, match='by assignment is deprecated'): raw.annotations = None
def test_chunk_duration(): """Test chunk_duration.""" # create dummy raw raw = RawArray(data=np.empty([10, 10], dtype=np.float64), info=create_info(ch_names=10, sfreq=1.), first_samp=0) raw.info['meas_date'] = 0 raw.set_annotations(Annotations(description='foo', onset=[0], duration=[10], orig_time=None)) # expected_events = [[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 0, 1], .. # [9, 0, 1], [9, 0, 1]] expected_events = np.atleast_2d(np.repeat(range(10), repeats=2)).T expected_events = np.insert(expected_events, 1, 0, axis=1) expected_events = np.insert(expected_events, 2, 1, axis=1) events, events_id = events_from_annotations(raw, chunk_duration=.5, use_rounding=False) assert_array_equal(events, expected_events) # test chunk durations that do not fit equally in annotation duration expected_events = np.zeros((3, 3)) expected_events[:, -1] = 1 expected_events[:, 0] = np.arange(0, 9, step=3) events, events_id = events_from_annotations(raw, chunk_duration=3.) assert_array_equal(events, expected_events)
def _raw_annot(meas_date, orig_time): info = create_info(ch_names=10, sfreq=10.) raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) raw.info['meas_date'] = meas_date annot = Annotations([.5], [.2], ['dummy'], orig_time) raw.set_annotations(annotations=annot) return raw
def write_mnefiff(data, filename): """Export data to MNE using FIFF format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (include '.mat') Notes ----- It cannot store data larger than 2 GB. The data is assumed to have only EEG electrodes. It overwrites a file if it exists. """ from mne import create_info, set_log_level from mne.io import RawArray set_log_level(WARNING) TRIAL = 0 info = create_info(list(data.axis['chan'][TRIAL]), data.s_freq, ['eeg', ] * data.number_of('chan')[TRIAL]) UNITS = 1e-6 # mne wants data in uV fiff = RawArray(data.data[0] * UNITS, info) if data.attr['chan']: fiff.set_channel_positions(data.attr['chan'].return_xyz(), data.attr['chan'].return_label()) fiff.save(filename, overwrite=True)
def test_resample_raw(): """Test resampling using RawArray.""" x = np.zeros((1, 1001)) sfreq = 2048. raw = RawArray(x, create_info(1, sfreq, 'eeg')) raw.resample(128, npad=10) data = raw.get_data() assert data.shape == (1, 63)
def raw_factory(meas_date): raw = RawArray(data=np.empty((10, 10)), info=create_info(ch_names=10, sfreq=10., ), first_samp=10) raw.info['meas_date'] = meas_date raw.set_annotations(annotations=Annotations(onset=[.5], duration=[.2], description='dummy', orig_time=None)) return raw
def test_time_as_index_ref(offset, origin): """Test indexing of raw times.""" meas_date = 1 info = create_info(ch_names=10, sfreq=10.) raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) raw.info['meas_date'] = meas_date relative_times = raw.times inds = raw.time_as_index(relative_times + offset, use_rounding=True, origin=origin) assert_array_equal(inds, np.arange(raw.n_times))
def test_picks_by_channels(): """Test creating pick_lists.""" rng = np.random.RandomState(909) test_data = rng.random_sample((4, 2000)) ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] ch_types = ['grad', 'mag', 'mag', 'eeg'] sfreq = 250.0 info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) _assert_channel_types(info) raw = RawArray(test_data, info) pick_list = _picks_by_type(raw.info) assert_equal(len(pick_list), 3) assert_equal(pick_list[0][0], 'mag') pick_list2 = _picks_by_type(raw.info, meg_combined=False) assert_equal(len(pick_list), len(pick_list2)) assert_equal(pick_list2[0][0], 'mag') pick_list2 = _picks_by_type(raw.info, meg_combined=True) assert_equal(len(pick_list), len(pick_list2) + 1) assert_equal(pick_list2[0][0], 'meg') test_data = rng.random_sample((4, 2000)) ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] ch_types = ['mag', 'mag', 'mag', 'mag'] sfreq = 250.0 info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) raw = RawArray(test_data, info) # This acts as a set, not an order assert_array_equal(pick_channels(info['ch_names'], ['MEG 002', 'MEG 001']), [0, 1]) # Make sure checks for list input work. pytest.raises(ValueError, pick_channels, ch_names, 'MEG 001') pytest.raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi') pick_list = _picks_by_type(raw.info) assert_equal(len(pick_list), 1) assert_equal(pick_list[0][0], 'mag') pick_list2 = _picks_by_type(raw.info, meg_combined=True) assert_equal(len(pick_list), len(pick_list2)) assert_equal(pick_list2[0][0], 'mag') # pick_types type check pytest.raises(ValueError, raw.pick_types, eeg='string') # duplicate check names = ['MEG 002', 'MEG 002'] assert len(pick_channels(raw.info['ch_names'], names)) == 1 assert len(raw.copy().pick_channels(names)[0][0]) == 1
def test_mark_flat(first_samp): """Test marking flat segments.""" # Test if ECG analysis will work on data that is not preloaded n_ch, n_times = 11, 1000 data = np.random.RandomState(0).randn(n_ch, n_times) assert not (np.diff(data, axis=-1) == 0).any() # nothing flat at first info = create_info(n_ch, 1000., 'eeg') info['meas_date'] = (1, 2) # test first_samp != for gh-6295 raw = RawArray(data, info, first_samp=first_samp) raw.info['bads'] = [raw.ch_names[-1]] # # First make a channel flat the whole time # raw_0 = raw.copy() raw_0._data[0] = 0. for kwargs, bads, want_times in [ # Anything < 1 will mark spatially (dict(bad_percent=100.), [], 0), (dict(bad_percent=99.9), [raw.ch_names[0]], n_times), (dict(), [raw.ch_names[0]], n_times)]: # default (1) raw_time = mark_flat(raw_0.copy(), verbose='debug', **kwargs) want_bads = raw.info['bads'] + bads assert raw_time.info['bads'] == want_bads n_good_times = raw_time.get_data(reject_by_annotation='omit').shape[1] assert n_good_times == want_times # # Now make a channel flat for 20% of the time points # raw_0 = raw.copy() n_good_times = int(round(0.8 * n_times)) raw_0._data[0, n_good_times:] = 0. threshold = 100 * (n_times - n_good_times) / n_times for kwargs, bads, want_times in [ # Should change behavior at bad_percent=20 (dict(bad_percent=100), [], n_good_times), (dict(bad_percent=threshold), [], n_good_times), (dict(bad_percent=threshold - 1e-5), [raw.ch_names[0]], n_times), (dict(), [raw.ch_names[0]], n_times)]: raw_time = mark_flat(raw_0.copy(), verbose='debug', **kwargs) want_bads = raw.info['bads'] + bads assert raw_time.info['bads'] == want_bads n_good_times = raw_time.get_data(reject_by_annotation='omit').shape[1] assert n_good_times == want_times with pytest.raises(TypeError, match='must be an instance of BaseRaw'): mark_flat(0.) with pytest.raises(ValueError, match='not convert string to float'): mark_flat(raw, 'x')
def test_annotations(): """Test annotation class.""" raw = read_raw_fif(fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) assert_raises(ValueError, Annotations, onset, duration, description[:9]) assert_raises(ValueError, Annotations, [onset, 1], duration, description) assert_raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() orig_time = (meas_date[0] + meas_date[1] * 0.000001 + raw2.first_samp / raw2.info['sfreq']) annot = Annotations(onset, duration, description, orig_time) raw2.annotations = annot assert_array_equal(raw2.annotations.onset, onset) concatenate_raws([raw, raw2]) assert_array_almost_equal(onset + 20., raw.annotations.onset, decimal=2) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = 0 raws = [] for i, fs in enumerate([1000, 100, 12]): raw = RawArray(data.copy(), info, first_samp=fs) ants = Annotations([1., 2.], [.5, .5], 'x', fs / sfreq) raw.annotations = ants raws.append(raw) raw = concatenate_raws(raws) assert_array_equal(raw.annotations.onset, [1., 2., 11., 12., 21., 22.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22., 5]) assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, 1.5]) assert_array_equal(raw.annotations.description, ['x', 'x', 'x', 'x', 'x', 'y'])
def test_date_none(tmpdir): """Test that DATE_NONE is used properly.""" # Regression test for gh-5908 n_chans = 139 n_samps = 20 data = np.random.random_sample((n_chans, n_samps)) ch_names = ['E{}'.format(x) for x in range(n_chans)] ch_types = ['eeg'] * n_chans info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=2048) assert info['meas_date'] is None raw = RawArray(data=data, info=info) fname = op.join(str(tmpdir), 'test-raw.fif') raw.save(fname) raw_read = read_raw_fif(fname, preload=True) assert raw_read.info['meas_date'] is None
def test_raw_reject(): """Test raw data getter with annotation reject.""" sfreq = 100. info = create_info(['a', 'b', 'c', 'd', 'e'], sfreq, ch_types='eeg') raw = RawArray(np.ones((5, 15000)), info) with pytest.warns(RuntimeWarning, match='outside the data range'): raw.set_annotations(Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD')) data, times = raw.get_data([0, 1, 3, 4], 100, 11200, # 1-112 sec 'omit', return_times=True) bad_times = np.concatenate([np.arange(200, 400), np.arange(10000, 10800), np.arange(10500, 11000)]) expected_times = np.setdiff1d(np.arange(100, 11200), bad_times) / sfreq assert_allclose(times, expected_times) # with orig_time and complete overlap raw = read_raw_fif(fif_fname) raw.set_annotations(Annotations(onset=[1, 4, 5] + raw._first_time, duration=[1, 3, 1], description='BAD', orig_time=raw.info['meas_date'])) t_stop = 18. assert raw.times[-1] > t_stop n_stop = int(round(t_stop * raw.info['sfreq'])) n_drop = int(round(4 * raw.info['sfreq'])) assert len(raw.times) >= n_stop data, times = raw.get_data(range(10), 0, n_stop, 'omit', True) assert data.shape == (10, n_stop - n_drop) assert times[-1] == raw.times[n_stop - 1] assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) data, times = raw.get_data(range(10), 0, n_stop, 'NaN', True) assert_array_equal(data.shape, (10, n_stop)) assert times[-1] == raw.times[n_stop - 1] t_1, t_2 = raw.time_as_index([1, 2], use_rounding=True) assert np.isnan(data[:, t_1:t_2]).all() # 1s -2s assert not np.isnan(data[:, :t_1].any()) assert not np.isnan(data[:, t_2:].any()) assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) assert_array_equal(raw.get_data(), raw[:][0]) # Test _sync_onset times = [10, -88, 190] onsets = _sync_onset(raw, times) assert_array_almost_equal(onsets, times - raw.first_samp / raw.info['sfreq']) assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
def test_crop(): """Test cropping raw files """ # split a concatenated file to test a difficult case raw = concatenate_raws([Raw(f) for f in [fif_fname, fif_fname]]) split_size = 10. # in seconds sfreq = raw.info['sfreq'] nsamp = (raw.last_samp - raw.first_samp + 1) # do an annoying case (off-by-one splitting) tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))] tmins = np.sort(tmins) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) tmaxs /= sfreq tmins /= sfreq raws = [None] * len(tmins) for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)): raws[ri] = raw.copy().crop(tmin, tmax, copy=False) all_raw_2 = concatenate_raws(raws, preload=False) assert_equal(raw.first_samp, all_raw_2.first_samp) assert_equal(raw.last_samp, all_raw_2.last_samp) assert_array_equal(raw[:, :][0], all_raw_2[:, :][0]) tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) tmaxs /= sfreq tmins /= sfreq # going in revere order so the last fname is the first file (need it later) raws = [None] * len(tmins) for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)): raws[ri] = raw.copy().crop(tmin, tmax, copy=False) # test concatenation of split file all_raw_1 = concatenate_raws(raws, preload=False) all_raw_2 = raw.copy().crop(0, None, copy=False) for ar in [all_raw_1, all_raw_2]: assert_equal(raw.first_samp, ar.first_samp) assert_equal(raw.last_samp, ar.last_samp) assert_array_equal(raw[:, :][0], ar[:, :][0]) # test shape consistency of cropped raw data = np.zeros((1, 1002001)) info = create_info(1, 1000) raw = RawArray(data, info) for tmin in range(0, 1001, 100): raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2, copy=False) assert_equal(raw1[:][0].shape, (1, 2001))
def test_raw_reject(): """Test raw data getter with annotation reject.""" info = create_info(['a', 'b', 'c', 'd', 'e'], 100, ch_types='eeg') raw = RawArray(np.ones((5, 15000)), info) with warnings.catch_warnings(record=True): # one outside range raw.annotations = Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD') data = raw.get_data([0, 1, 3, 4], 100, 11200, 'omit') assert_array_equal(data.shape, (4, 9900)) # with orig_time and complete overlap raw = read_raw_fif(fif_fname) raw.annotations = Annotations([44, 47, 48], [1, 3, 1], 'BAD', raw.info['meas_date']) data, times = raw.get_data(range(10), 0, 6000, 'omit', True) assert_array_equal(data.shape, (10, 4799)) assert_equal(times[-1], raw.times[5999]) assert_array_equal(data[:, -100:], raw[:10, 5900:6000][0]) data, times = raw.get_data(range(10), 0, 6000, 'NaN', True) assert_array_equal(data.shape, (10, 6000)) assert_equal(times[-1], raw.times[5999]) assert_true(np.isnan(data[:, 313:613]).all()) # 1s -2s assert_true(not np.isnan(data[:, 614].any())) assert_array_equal(data[:, -100:], raw[:10, 5900:6000][0]) assert_array_equal(raw.get_data(), raw[:][0]) # Test _sync_onset times = [10, -88, 190] onsets = _sync_onset(raw, times) assert_array_almost_equal(onsets, times - raw.first_samp / raw.info['sfreq']) assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
def _get_data(): """Helper to get some starting data""" # raw with ECG channel raw = Raw(raw_fname).crop(0.0, 5.0).load_data() data_picks = pick_types(raw.info, meg=True, eeg=True) other_picks = pick_types(raw.info, meg=False, stim=True, eog=True) picks = np.sort(np.concatenate((data_picks[::16], other_picks))) raw = raw.pick_channels([raw.ch_names[p] for p in picks]) ecg = RawArray(np.zeros((1, len(raw.times))), create_info(["ECG 063"], raw.info["sfreq"], "ecg")) for key in ("dev_head_t", "buffer_size_sec", "highpass", "lowpass", "filename", "dig"): ecg.info[key] = raw.info[key] raw.add_channels([ecg]) src = read_source_spaces(src_fname) trans = read_trans(trans_fname) sphere = make_sphere_model("auto", "auto", raw.info) stc = _make_stc(raw, src) return raw, src, stc, trans, sphere
def _get_data(): """Helper to get some starting data.""" # raw with ECG channel raw = read_raw_fif(raw_fname).crop(0., 5.0).load_data() data_picks = pick_types(raw.info, meg=True, eeg=True) other_picks = pick_types(raw.info, meg=False, stim=True, eog=True) picks = np.sort(np.concatenate((data_picks[::16], other_picks))) raw = raw.pick_channels([raw.ch_names[p] for p in picks]) raw.info.normalize_proj() ecg = RawArray(np.zeros((1, len(raw.times))), create_info(['ECG 063'], raw.info['sfreq'], 'ecg')) for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass', 'dig'): ecg.info[key] = raw.info[key] raw.add_channels([ecg]) src = read_source_spaces(src_fname) trans = read_trans(trans_fname) sphere = make_sphere_model('auto', 'auto', raw.info) stc = _make_stc(raw, src) return raw, src, stc, trans, sphere
def test_apply_function_verbose(): """Test apply function verbosity.""" n_chan = 2 n_times = 3 ch_names = [str(ii) for ii in range(n_chan)] raw = RawArray(np.zeros((n_chan, n_times)), create_info(ch_names, 1., 'mag')) # test return types in both code paths (parallel / 1 job) pytest.raises(TypeError, raw.apply_function, bad_1) pytest.raises(ValueError, raw.apply_function, bad_2) pytest.raises(TypeError, raw.apply_function, bad_1, n_jobs=2) pytest.raises(ValueError, raw.apply_function, bad_2, n_jobs=2) # check our arguments with catch_logging() as sio: out = raw.apply_function(printer, verbose=False) assert len(sio.getvalue()) == 0 assert out is raw raw.apply_function(printer, verbose=True) assert sio.getvalue().count('\n') == n_chan
def extract_X_and_y(raw_nparray, raw_info, opts, verbose=False): # need to make a new RawArray, because once we apply filter, we mutate its internal _data raw = RawArray(raw_nparray, raw_info, verbose=verbose) picks = pick_types(raw.info, eeg=True) picks = getChannelSubsetMotorBand() # picks = getChannelSubsetFront() # picks = getChannelSubsetBack() # print picks # Apply band-pass filter raw._data[picks] = lfilter(opts.b, opts.a, raw._data[picks]) consecutive = True train_events = mne.find_events(raw, shortest_event=0, consecutive=consecutive, verbose=verbose) train_epochs = Epochs( raw, train_events, opts.event_labels, tmin=opts.epoch_full_tmin, tmax=opts.epoch_full_tmax, proj=True, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=verbose, ) epochs_trimmed = train_epochs.copy().crop(tmin=opts.epoch_trim_tmin, tmax=opts.epoch_trim_tmax) if verbose: print "train: epochs", epochs_trimmed X = epochs_trimmed.get_data() y = epochs_trimmed.events[:, -1] - 2 if verbose: print "y", y.shape return [X, y]
def test_get_data_reject(): """Test if reject_by_annotation is working correctly.""" fs = 256 ch_names = ["C3", "Cz", "C4"] info = create_info(ch_names, sfreq=fs) raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info) raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2], description="bad")) with catch_logging() as log: data = raw.get_data(reject_by_annotation="omit", verbose=True) msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' + ' (60.00%) samples.') assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 1536) with catch_logging() as log: data = raw.get_data(reject_by_annotation="nan", verbose=True) msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' + ' (60.00%) samples.') assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 2560) # shape doesn't change assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead
def test_apply_function_verbose(): """Test apply function verbosity """ n_chan = 2 n_times = 3 ch_names = [str(ii) for ii in range(n_chan)] raw = RawArray(np.zeros((n_chan, n_times)), create_info(ch_names, 1., 'mag')) # test return types in both code paths (parallel / 1 job) assert_raises(TypeError, raw.apply_function, bad_1, None, None, 1) assert_raises(ValueError, raw.apply_function, bad_2, None, None, 1) assert_raises(TypeError, raw.apply_function, bad_1, None, None, 2) assert_raises(ValueError, raw.apply_function, bad_2, None, None, 2) # check our arguments with catch_logging() as sio: raw.apply_function(printer, None, None, 1, verbose=False) assert_equal(len(sio.getvalue()), 0) raw.apply_function(printer, None, None, 1, verbose=True) assert_equal(sio.getvalue().count('\n'), n_chan)
def test_raw_array_orig_times(): """Test combining with RawArray and orig_times.""" data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') assert_and_remove_boundary_annot(raw, 3) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [5., 124., 125., 135., 144., 145., 154.]) assert_array_equal(raw.annotations.duration, [1.5, .5, .5, .5, .5, .5, .5]) assert_array_equal(raw.annotations.description, ['y', 'x', 'x', 'x', 'x', 'x', 'x']) # These three things should be equivalent expected_orig_time = (raw.info['meas_date'][0] + raw.info['meas_date'][1] / 1000000) for empty_annot in ( Annotations([], [], [], expected_orig_time), Annotations([], [], [], None), None): raw.set_annotations(empty_annot) assert isinstance(raw.annotations, Annotations) assert len(raw.annotations) == 0 assert raw.annotations.orig_time == expected_orig_time
def test_annotation_omit(): """Test raw.get_data with annotations.""" data = np.concatenate([np.ones((1, 1000)), 2 * np.ones((1, 1000))], -1) info = create_info(1, 1000., 'eeg') raw = RawArray(data, info) raw.set_annotations(Annotations([0.5], [1], ['bad'])) expected = raw[0][0] assert_allclose(raw.get_data(reject_by_annotation=None), expected) # nan expected[0, 500:1500] = np.nan assert_allclose(raw.get_data(reject_by_annotation='nan'), expected) got = np.concatenate([raw.get_data(start=start, stop=stop, reject_by_annotation='nan') for start, stop in ((0, 1000), (1000, 2000))], -1) assert_allclose(got, expected) # omit expected = expected[:, np.isfinite(expected[0])] assert_allclose(raw.get_data(reject_by_annotation='omit'), expected) got = np.concatenate([raw.get_data(start=start, stop=stop, reject_by_annotation='omit') for start, stop in ((0, 1000), (1000, 2000))], -1) assert_allclose(got, expected) pytest.raises(ValueError, raw.get_data, reject_by_annotation='foo')
def _create_annotation_based_on_descr(description, annotation_start_sampl=0, duration=0, orig_time=0): """Create a raw object with annotations from descriptions. The returning raw object contains as many annotations as description given. All starting at `annotation_start_sampl`. """ # create dummy raw raw = RawArray(data=np.empty([10, 10], dtype=np.float64), info=create_info(ch_names=10, sfreq=1000.), first_samp=0) raw.info['meas_date'] = 0 # create dummy annotations based on the descriptions onset = raw.times[annotation_start_sampl] onset_matching_desc = np.full_like(description, onset, dtype=type(onset)) duration_matching_desc = np.full_like(description, duration, dtype=type(duration)) annot = Annotations(description=description, onset=onset_matching_desc, duration=duration_matching_desc, orig_time=orig_time) if duration != 0: with pytest.warns(RuntimeWarning, match='Limited.*expanding outside'): # duration 0.1s is larger than the raw data expand raw.set_annotations(annot) else: raw.set_annotations(annot) # Make sure that set_annotations(annot) works assert all(raw.annotations.onset == onset) if duration != 0: expected_duration = (len(raw.times) / raw.info['sfreq']) - onset else: expected_duration = 0 _duration = raw.annotations.duration[0] assert _duration == approx(expected_duration) assert all(raw.annotations.duration == _duration) assert all(raw.annotations.description == description) return raw
def test_fnirs_channel_naming_and_order_custom_raw(): """Ensure fNIRS channel checking on manually created data.""" data = np.random.normal(size=(6, 10)) # Start with a correctly named raw intensity dataset # These are the steps required to build an fNIRS Raw object from scratch ch_names = [ 'S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850' ] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f freqs = np.unique(_channel_frequencies(raw.info)) picks = _check_channels_ordered(raw.info, freqs) assert len(picks) == len(raw.ch_names) assert len(picks) == 6 # Different systems use different frequencies, so ensure that works ch_names = [ 'S1_D1 920', 'S1_D1 850', 'S2_D1 920', 'S2_D1 850', 'S3_D1 920', 'S3_D1 850' ] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([920, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f picks = _check_channels_ordered(raw.info, [920, 850]) assert len(picks) == len(raw.ch_names) assert len(picks) == 6 # Catch expected errors # The frequencies named in the channel names must match the info loc field ch_names = [ 'S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850' ] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([920, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f with pytest.raises(ValueError, match='name and NIRS frequency do not'): _check_channels_ordered(raw.info, [920, 850]) # Catch if someone doesn't set the info field ch_names = [ 'S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850' ] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) with pytest.raises(ValueError, match='missing wavelength information'): _check_channels_ordered(raw.info, [920, 850]) # I have seen data encoded not in alternating frequency, but blocked. ch_names = [ 'S1_D1 760', 'S2_D1 760', 'S3_D1 760', 'S1_D1 850', 'S2_D1 850', 'S3_D1 850' ] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.repeat([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f with pytest.raises(ValueError, match='channels not ordered correctly'): _check_channels_ordered(raw.info, [760, 850]) # and this is how you would fix the ordering, then it should pass raw.pick(picks=[0, 3, 1, 4, 2, 5]) _check_channels_ordered(raw.info, [760, 850])
def test_plot_misc_auto(): """Test plotting of data with misc auto scaling.""" data = np.random.RandomState(0).randn(1, 1000) raw = RawArray(data, create_info(1, 1000., 'misc')) raw.plot() plt.close('all')
freq = 250 numcycles = 9 sim = simulate_hfo(sfreq, freq, numcycles)[0] ev_start = sfreq data[ev_start:ev_start + len(sim)] += sim * 10 sim = simulate_hfo(sfreq, freq, numcycles)[0] ev_start = 7 * sfreq data[ev_start:ev_start + len(sim)] += sim * 10 # convert the data into mne-python # note: the channel names are made up and the channel types are just # set to 'seeg' for the sake of the example ch_names = ['A1'] info = create_info(sfreq=sfreq, ch_names=ch_names, ch_types='seeg') raw = RawArray(data=data[np.newaxis, :], info=info) ############################################################################### # Let's plot the data and see what it looks like raw.plot() ############################################################################### # Detect HFOs # ----------- # All detectors inherit from the base class ``mne_hfo.base.Detector``, # which inherits from the :class:`sklearn.base.BaseEstimator` class. # To run any estimator, one instantiates it along with the hyper-parameters, # and then calls the ``fit`` function. Afterwards, detected HFOs are available # in the various data structures. The recommended usage is the DataFrame, which # is accessible via the ``mne_hfo.base.Detector.hfo_df`` property.
def test_get_montage(): """Test ContainsMixin.get_montage().""" ch_names = make_standard_montage('standard_1020').ch_names sfreq = 512 data = np.zeros((len(ch_names), sfreq * 2)) raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) raw.set_montage('standard_1020') assert len(raw.get_montage().ch_names) == len(ch_names) raw.info['bads'] = [ch_names[0]] assert len(raw.get_montage().ch_names) == len(ch_names) # test info raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) raw.set_montage('standard_1020') assert len(raw.info.get_montage().ch_names) == len(ch_names) raw.info['bads'] = [ch_names[0]] assert len(raw.info.get_montage().ch_names) == len(ch_names)
def test_basics(): """Test annotation class.""" raw = read_raw_fif(fif_fname) assert raw.annotations is None pytest.raises(IOError, read_annotations, fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) pytest.raises(ValueError, Annotations, onset, duration, description[:9]) pytest.raises(ValueError, Annotations, [onset, 1], duration, description) pytest.raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() delta = raw.times[-1] + 1. / raw.info['sfreq'] orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2.first_samp / raw2.info['sfreq']) annot = Annotations(onset, duration, description, orig_time) assert ' segments' in repr(annot) raw2.annotations = annot assert_array_equal(raw2.annotations.onset, onset) concatenate_raws([raw, raw2]) raw.annotations.delete(-1) # remove boundary annotations raw.annotations.delete(-1) assert_allclose(onset + delta, raw.annotations.onset, rtol=1e-5) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = np.pi raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.annotations = ants raws.append(raw) raw = RawArray(data.copy(), info) raw.annotations = Annotations([1.], [.5], 'x', None) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') boundary_idx = np.where(raw.annotations.description == 'BAD boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) boundary_idx = np.where(raw.annotations.description == 'EDGE boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) assert_array_equal(raw.annotations.onset, [1., 2., 11., 12., 21., 22., 31.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22., 31.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22., 31., 5]) assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, .5, 1.5]) assert_array_equal(raw.annotations.description, ['x', 'x', 'x', 'x', 'x', 'x', 'y'])
def test_find_events(): """Test find events in raw file.""" events = read_events(fname) raw = read_raw_fif(raw_fname, preload=True) # let's test the defaulting behavior while we're at it extra_ends = ['', '_1'] orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends] os.environ['MNE_STIM_CHANNEL'] = 'STI 014' if 'MNE_STIM_CHANNEL_1' in os.environ: del os.environ['MNE_STIM_CHANNEL_1'] events2 = find_events(raw) assert_array_almost_equal(events, events2) # now test with mask events11 = find_events(raw, mask=3, mask_type='not_and') with pytest.warns(RuntimeWarning, match='events masked'): events22 = read_events(fname, mask=3, mask_type='not_and') assert_array_equal(events11, events22) # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 raw._update_times() stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], include=[stim_channel]) # test digital masking raw._data[stim_channel_idx, :5] = np.arange(5) raw._data[stim_channel_idx, 5:] = 0 # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100' pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and') pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah') # testing mask_type. default = 'not_and' assert_array_equal( find_events(raw, shortest_event=1, mask=1, mask_type='not_and'), [[2, 0, 2], [4, 2, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=2, mask_type='not_and'), [[1, 0, 1], [3, 0, 1], [4, 1, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=3, mask_type='not_and'), [[4, 0, 4]]) assert_array_equal( find_events(raw, shortest_event=1, mask=4, mask_type='not_and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) # testing with mask_type = 'and' assert_array_equal( find_events(raw, shortest_event=1, mask=1, mask_type='and'), [[1, 0, 1], [3, 0, 1]]) assert_array_equal( find_events(raw, shortest_event=1, mask=2, mask_type='and'), [[2, 0, 2]]) assert_array_equal( find_events(raw, shortest_event=1, mask=3, mask_type='and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) assert_array_equal( find_events(raw, shortest_event=1, mask=4, mask_type='and'), [[4, 0, 4]]) # test empty events channel raw._data[stim_channel_idx, :] = 0 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, :4] = 1 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, -1:] = 9 assert_array_equal(find_events(raw), [[14399, 0, 9]]) # Test that we can handle consecutive events with no gap raw._data[stim_channel_idx, 10:20] = 5 raw._data[stim_channel_idx, 20:30] = 6 raw._data[stim_channel_idx, 30:32] = 5 raw._data[stim_channel_idx, 40] = 6 assert_array_equal(find_events(raw, consecutive=False), [[10, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, consecutive=True), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw), [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, output='offset', consecutive=False), [[31, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, output='offset', consecutive=True), [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]]) pytest.raises(ValueError, find_events, raw, output='step', consecutive=True) assert_array_equal( find_events(raw, output='step', consecutive=True, shortest_event=1), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6], [41, 6, 0], [14399, 0, 9], [14400, 9, 0]]) assert_array_equal(find_events(raw, output='offset'), [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002), [[10, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002), [[10, 0, 5], [20, 5, 6], [30, 6, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=False, min_duration=0.002), [[31, 0, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=True, min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003), [[10, 0, 5], [20, 5, 6]]) # test find_stim_steps merge parameter raw._data[stim_channel_idx, :] = 0 raw._data[stim_channel_idx, 0] = 1 raw._data[stim_channel_idx, 10] = 4 raw._data[stim_channel_idx, 11:20] = 5 assert_array_equal( find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel), [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=-1, stim_channel=stim_channel), [[1, 1, 0], [10, 0, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=1, stim_channel=stim_channel), [[1, 1, 0], [11, 0, 5], [20, 5, 0]]) # put back the env vars we trampled on for s, o in zip(extra_ends, orig_envs): if o is not None: os.environ['MNE_STIM_CHANNEL%s' % s] = o # Test with list of stim channels raw._data[stim_channel_idx, 1:101] = np.zeros(100) raw._data[stim_channel_idx, 10:11] = 1 raw._data[stim_channel_idx, 30:31] = 3 stim_channel2 = 'STI 015' stim_channel2_idx = pick_channels(raw.info['ch_names'], include=[stim_channel2]) raw._data[stim_channel2_idx, :] = 0 raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105] events1 = find_events(raw, stim_channel='STI 014') events2 = events1.copy() events2[:, 0] -= 5 events = find_events(raw, stim_channel=['STI 014', stim_channel2]) assert_array_equal(events[::2], events2) assert_array_equal(events[1::2], events1) # test initial_event argument info = create_info(['MYSTI'], 1000, 'stim') data = np.zeros((1, 1000)) raw = RawArray(data, info) data[0, :10] = 100 data[0, 30:40] = 200 assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]]) assert_array_equal(find_events(raw, 'MYSTI', initial_event=True), [[0, 0, 100], [30, 0, 200]]) # test error message for raw without stim channels raw = read_raw_fif(raw_fname, preload=True) raw.pick_types(meg=True, stim=False) # raw does not have annotations with pytest.raises(ValueError, match="'stim_channel'"): find_events(raw) # if raw has annotations, we show a different error message raw.set_annotations(Annotations(0, 2, "test")) with pytest.raises(ValueError, match="mne.events_from_annotations"): find_events(raw)
def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray(data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6)
def load_data(fnames, sfreq=128., replace_ch_names=None): """Load CSV files from the /data directory into a Raw object. Args: fnames (array): CSV filepaths from which to load data Keyword Args: sfreq (float): EEG sampling frequency replace_ch_names (dict or None): dictionary containing a mapping to rename channels. Useful when an external electrode was used. Returns: (mne.io.array.array.RawArray): loaded EEG """ raw = [] print(fnames) for fname in fnames: # read the file data = pd.read_csv(fname, index_col=0) data = data.dropna() # get estimation of sampling rate and use to determine sfreq # yes, this could probably be improved srate = 1000 / (data.index.values[1] - data.index.values[0]) if srate >= 200: sfreq = 256 else: sfreq = 128 # name of each channel ch_names = list(data.columns) # indices of each channel ch_ind = list(range(len(ch_names))) if replace_ch_names is not None: ch_names = [ c if c not in replace_ch_names.keys() else replace_ch_names[c] for c in ch_names ] # type of each channels ch_types = ['eeg'] * (len(ch_ind) - 1) + ['stim'] montage = read_montage('standard_1005') # get data and exclude Aux channel data = data.values[:, ch_ind].T # create MNE object info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, montage=montage) raw.append(RawArray(data=data, info=info)) # concatenate all raw objects raws = concatenate_raws(raw) return raws
inlet = lsl_connect() seconds = 20 chunk, timestamps = inlet.pull_chunk(max_samples=2000) ts = np.zeros((0, 64)) for i in range(seconds): sleep(1) chunk, timestamps = inlet.pull_chunk(max_samples=2000) chunk = np.array(chunk) print(ts.shape) ts = np.concatenate([ts, chunk], axis=0) print(chunk.shape, ts.shape) ts = ts.T print(ts.shape) raw = RawArray(data=ts, info=stream_info) raw = raw.filter(LO_FREQ, HI_FREQ, method='fir', fir_design='firwin', phase='zero') raw.resample(125) print(raw) raw_data = raw.get_data(picks=sorted(GOODS)) / 1000 print(raw_data.shape) for i in range(9): print(min(raw_data[i]), max(raw_data[i]), np.mean(raw_data[i]))
def test_csd_degenerate(evoked_csd_sphere): """Test degenerate conditions.""" evoked, csd, sphere = evoked_csd_sphere warn_evoked = evoked.copy() warn_evoked.info['bads'].append(warn_evoked.ch_names[3]) with pytest.raises(ValueError, match='Either drop.*or interpolate'): compute_current_source_density(warn_evoked) with pytest.raises(TypeError, match='must be an instance of'): compute_current_source_density(None) fail_evoked = evoked.copy() with pytest.raises(ValueError, match='Zero or infinite position'): for ch in fail_evoked.info['chs']: ch['loc'][:3] = np.array([0, 0, 0]) compute_current_source_density(fail_evoked, sphere=sphere) with pytest.raises(ValueError, match='Zero or infinite position'): fail_evoked.info['chs'][3]['loc'][:3] = np.inf compute_current_source_density(fail_evoked, sphere=sphere) with pytest.raises(ValueError, match='No EEG channels found.'): fail_evoked = evoked.copy() fail_evoked.set_channel_types({ch_name: 'ecog' for ch_name in fail_evoked.ch_names}) compute_current_source_density(fail_evoked, sphere=sphere) with pytest.raises(TypeError, match='lambda2'): compute_current_source_density(evoked, lambda2='0', sphere=sphere) with pytest.raises(ValueError, match='lambda2 must be between 0 and 1'): compute_current_source_density(evoked, lambda2=2, sphere=sphere) with pytest.raises(TypeError, match='stiffness must be'): compute_current_source_density(evoked, stiffness='0', sphere=sphere) with pytest.raises(ValueError, match='stiffness must be non-negative'): compute_current_source_density(evoked, stiffness=-2, sphere=sphere) with pytest.raises(TypeError, match='n_legendre_terms must be'): compute_current_source_density(evoked, n_legendre_terms=0.1, sphere=sphere) with pytest.raises(ValueError, match=('n_legendre_terms must be ' 'greater than 0')): compute_current_source_density(evoked, n_legendre_terms=0, sphere=sphere) with pytest.raises(ValueError, match='sphere must be'): compute_current_source_density(evoked, sphere=-0.1) with pytest.raises(ValueError, match=('sphere radius must be ' 'greater than 0')): compute_current_source_density(evoked, sphere=(-0.1, 0., 0., -1.)) with pytest.raises(TypeError): compute_current_source_density(evoked, copy=2, sphere=sphere) # gh-7859 raw = RawArray(evoked.data, evoked.info) epochs = Epochs( raw, [[0, 0, 1]], tmin=0, tmax=evoked.times[-1] - evoked.times[0], baseline=None, preload=False, proj=False) epochs.drop_bad() assert len(epochs) == 1 assert_allclose(epochs.get_data()[0], evoked.data) with pytest.raises(RuntimeError, match='Computing CSD requires.*preload'): compute_current_source_density(epochs) epochs.load_data() raw = compute_current_source_density(raw) assert not np.allclose(raw.get_data(), evoked.data) evoked = compute_current_source_density(evoked) assert_allclose(raw.get_data(), evoked.data) epochs = compute_current_source_density(epochs) assert_allclose(epochs.get_data()[0], evoked.data)
def get_events(raw): ''' with the DBS, events are not able to be triggered so we have to use the pulses to determine the events''' from mne.io import RawArray, Raw import numpy as np from mne import create_info, Epochs, make_fixed_length_events, pick_types, find_events import os import numpy as np import matplotlib.pyplot as plt import glob, re from scipy import interpolate from scipy import signal # ch = raw._data[raw.info['ch_names'].index(raw.ch_names[3])].copy() b, a = signal.butter(3, 0.5, 'highpass') ch = signal.filtfilt(b, a, ch) # min_event_dist = 1.5 #float(input('Minimum Event Distance? ')) max_event_dist = 4 #float(input('Maximum Event Distance? ')) # done = False while not done: fig, ax = plt.subplots() minx = int(raw.info['sfreq'] * 10) maxx = int(raw.info['sfreq'] * 40) ax.plot(np.arange(minx, maxx) / raw.info['sfreq'], ch[minx:maxx]) plt.show() threshold = None while not threshold: try: threshold = float(input('Threshold? ')) except: threshold = None step = int(raw.info['sfreq'] * min_event_dist) # find a bunch of events, not all of which will be right print('Finding events') events = list() for i in tqdm(range(step, len(ch) - step, 2 * step)): max_index = np.argmax(abs(ch[i - step:i + step])) dist = np.sort(abs(ch[i - step:i + step])) compare_value = dist[-10] if ch[i - step + max_index] - compare_value > threshold: events.append(i - step + max_index) ok = False i = 0 indices = np.arange(len(events)) np.random.shuffle(indices) while not ok and i < len(events): fig, ax = plt.subplots() ax.plot(ch[int(events[indices[i]] - raw.info['sfreq']):int(events[indices[i]] + raw.info['sfreq'])]) plt.show() i += 1 ok = input('Enter to keep testing, type anything to stop\n') done = input( '%i events found. Enter to reset threshold, type anything to finish\n' % (len(events))) # # make a channel info = create_info(['DBS'], raw.info['sfreq'], ['stim'], verbose=False) arr = np.zeros((1, len(raw.times))) for i in events: arr[0, i:i + 100] = 1 event_ch = RawArray(arr, info, verbose=False) return event_ch
freq_band = 0.1 ext_signal = np.vstack([_bandpass_filter(raw, lowcut=f-freq_band, highcut=f+freq_band) for f in frequencies]) ############################################################################### # Creating an MNE Raw object from the extended signal and plot it info = create_info( ch_names=sum(list(map(lambda s: [ch+s for ch in raw.ch_names], ["-13Hz", "-17Hz", "-21Hz"])), []), ch_types=['eeg'] * 24, sfreq=sfreq) raw_ext = RawArray(ext_signal, info) raw_ext.plot(duration=n_seconds, start=14, n_channels=24, scalings={'eeg': 5e-4}, color={'eeg': 'steelblue'}) ############################################################################### # Building Epochs and plotting 3 s of the signal from electrode Oz for a trial epochs = Epochs(raw_ext, events, event_id, tmin=2, tmax=5, baseline=None) n_seconds = 3 time = np.linspace(0, n_seconds, n_seconds * sfreq, endpoint=False)[np.newaxis, :] channels = range(0, len(raw_ext.ch_names), len(raw.ch_names)) plt.figure(figsize=(7, 5)) for f, c in zip(frequencies, channels): plt.plot(epochs.get_data()[5, c, :].T, label=str(int(f))+' Hz')
def test_plot_raw_psd(): """Test plotting of raw psds.""" raw = _get_raw() # normal mode raw.plot_psd(average=False) # specific mode picks = pick_types(raw.info, meg='mag', eeg=False)[:4] raw.plot_psd(tmax=None, picks=picks, area_mode='range', average=False, spatial_colors=True) raw.plot_psd(tmax=20., color='yellow', dB=False, line_alpha=0.4, n_overlap=0.1, average=False) plt.close('all') ax = plt.axes() # if ax is supplied: pytest.raises(ValueError, raw.plot_psd, ax=ax, average=True) raw.plot_psd(tmax=None, picks=picks, ax=ax, average=True) plt.close('all') ax = plt.axes() with pytest.raises(ValueError, match='2 axes must be supplied, got 1'): raw.plot_psd(ax=ax, average=True) plt.close('all') ax = plt.subplots(2)[1] raw.plot_psd(tmax=None, ax=ax, average=True) plt.close('all') # topo psd ax = plt.subplot() raw.plot_psd_topo(axes=ax) plt.close('all') # with channel information not available for idx in range(len(raw.info['chs'])): raw.info['chs'][idx]['loc'] = np.zeros(12) with pytest.warns(RuntimeWarning, match='locations not available'): raw.plot_psd(spatial_colors=True, average=False) # with a flat channel raw[5, :] = 0 for dB, estimate in itertools.product((True, False), ('power', 'amplitude')): with pytest.warns(UserWarning, match='[Infinite|Zero]'): fig = raw.plot_psd(average=True, dB=dB, estimate=estimate) ylabel = fig.axes[1].get_ylabel() ends_dB = ylabel.endswith('mathrm{(dB)}$') if dB: assert ends_dB, ylabel else: assert not ends_dB, ylabel if estimate == 'amplitude': assert r'fT/cm/\sqrt{Hz}' in ylabel, ylabel else: assert estimate == 'power' assert '(fT/cm)²/Hz' in ylabel, ylabel ylabel = fig.axes[0].get_ylabel() if estimate == 'amplitude': assert r'fT/\sqrt{Hz}' in ylabel else: assert 'fT²/Hz' in ylabel # test reject_by_annotation raw = _get_raw() raw.set_annotations(Annotations([1, 5], [3, 3], ['test', 'test'])) raw.plot_psd(reject_by_annotation=True) raw.plot_psd(reject_by_annotation=False) plt.close('all') # test fmax value checking with pytest.raises(ValueError, match='not exceed one half the sampling'): raw.plot_psd(fmax=50000) # test xscale value checking with pytest.raises(ValueError, match="Invalid value for the 'xscale'"): raw.plot_psd(xscale='blah') # gh-5046 raw = read_raw_fif(raw_fname, preload=True).crop(0, 1) picks = pick_types(raw.info) raw.plot_psd(picks=picks, average=False) raw.plot_psd(picks=picks, average=True) plt.close('all') raw.set_channel_types( { 'MEG 0113': 'hbo', 'MEG 0112': 'hbr', 'MEG 0122': 'fnirs_raw', 'MEG 0123': 'fnirs_od' }, verbose='error') fig = raw.plot_psd() assert len(fig.axes) == 10 plt.close('all') # gh-7631 data = 1e-3 * np.random.rand(2, 100) info = create_info(['CH1', 'CH2'], 100) raw = RawArray(data, info) picks = pick_types(raw.info, misc=True) raw.plot_psd(picks=picks, spatial_colors=False) plt.close('all')
def test_basics(): """Test annotation class.""" raw = read_raw_fif(fif_fname) assert raw.annotations is not None # XXX to be fixed in #5416 assert len(raw.annotations.onset) == 0 # XXX to be fixed in #5416 pytest.raises(IOError, read_annotations, fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) pytest.raises(ValueError, Annotations, onset, duration, description[:9]) pytest.raises(ValueError, Annotations, [onset, 1], duration, description) pytest.raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() delta = raw.times[-1] + 1. / raw.info['sfreq'] orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2._first_time) offset = orig_time - _handle_meas_date(raw2.info['meas_date']) annot = Annotations(onset, duration, description, orig_time) assert ' segments' in repr(annot) raw2.set_annotations(annot) assert_array_equal(raw2.annotations.onset, onset + offset) assert id(raw2.annotations) != id(annot) concatenate_raws([raw, raw2]) raw.annotations.delete(-1) # remove boundary annotations raw.annotations.delete(-1) assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') boundary_idx = np.where(raw.annotations.description == 'BAD boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) boundary_idx = np.where(raw.annotations.description == 'EDGE boundary')[0] assert len(boundary_idx) == 3 raw.annotations.delete(boundary_idx) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154., 5.]) assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, .5, 1.5]) assert_array_equal(raw.annotations.description, ['x', 'x', 'x', 'x', 'x', 'x', 'y'])
def test_plot_raw_psd(raw, raw_orig): """Test plotting of raw psds.""" raw_unchanged = raw.copy() # normal mode fig = raw.plot_psd(average=False) fig.canvas.resize_event() # specific mode picks = pick_types(raw.info, meg='mag', eeg=False)[:4] raw.plot_psd(tmax=None, picks=picks, area_mode='range', average=False, spatial_colors=True) raw.plot_psd(tmax=20., color='yellow', dB=False, line_alpha=0.4, n_overlap=0.1, average=False) plt.close('all') # one axes supplied ax = plt.axes() raw.plot_psd(tmax=None, picks=picks, ax=ax, average=True) plt.close('all') # two axes supplied _, axs = plt.subplots(2) raw.plot_psd(tmax=None, ax=axs, average=True) plt.close('all') # need 2, got 1 ax = plt.axes() with pytest.raises(ValueError, match='of length 2, while the length is 1'): raw.plot_psd(ax=ax, average=True) plt.close('all') # topo psd ax = plt.subplot() raw.plot_psd_topo(axes=ax) plt.close('all') # with channel information not available for idx in range(len(raw.info['chs'])): raw.info['chs'][idx]['loc'] = np.zeros(12) with pytest.warns(RuntimeWarning, match='locations not available'): raw.plot_psd(spatial_colors=True, average=False) # with a flat channel raw[5, :] = 0 for dB, estimate in itertools.product((True, False), ('power', 'amplitude')): with pytest.warns(UserWarning, match='[Infinite|Zero]'): fig = raw.plot_psd(average=True, dB=dB, estimate=estimate) # check grad axes title = fig.axes[0].get_title() ylabel = fig.axes[0].get_ylabel() ends_dB = ylabel.endswith('mathrm{(dB)}$') unit = '(fT/cm)²/Hz' if estimate == 'power' else r'fT/cm/\sqrt{Hz}' assert title == 'Gradiometers', title assert unit in ylabel, ylabel if dB: assert ends_dB, ylabel else: assert not ends_dB, ylabel # check mag axes title = fig.axes[1].get_title() ylabel = fig.axes[1].get_ylabel() unit = 'fT²/Hz' if estimate == 'power' else r'fT/\sqrt{Hz}' assert title == 'Magnetometers', title assert unit in ylabel, ylabel # test reject_by_annotation raw = raw_unchanged raw.set_annotations(Annotations([1, 5], [3, 3], ['test', 'test'])) raw.plot_psd(reject_by_annotation=True) raw.plot_psd(reject_by_annotation=False) plt.close('all') # test fmax value checking with pytest.raises(ValueError, match='must not exceed ½ the sampling'): raw.plot_psd(fmax=50000) # test xscale value checking with pytest.raises(ValueError, match="Invalid value for the 'xscale'"): raw.plot_psd(xscale='blah') # gh-5046 raw = raw_orig.crop(0, 1) picks = pick_types(raw.info, meg=True) raw.plot_psd(picks=picks, average=False) raw.plot_psd(picks=picks, average=True) plt.close('all') raw.set_channel_types( { 'MEG 0113': 'hbo', 'MEG 0112': 'hbr', 'MEG 0122': 'fnirs_cw_amplitude', 'MEG 0123': 'fnirs_od' }, verbose='error') fig = raw.plot_psd() assert len(fig.axes) == 10 plt.close('all') # gh-7631 data = 1e-3 * np.random.rand(2, 100) info = create_info(['CH1', 'CH2'], 100) raw = RawArray(data, info) picks = pick_types(raw.info, misc=True) raw.plot_psd(picks=picks, spatial_colors=False) plt.close('all')
ch_types = ['stim'] * 2 + ['eeg'] * 4 montage = read_montage('standard_1005') # get data and exclude Aux channel data = data.values[:, -6:].T data # convert in Volts (from uVolts) #data[:-1] *= 1e-6 # create mne objects info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, montage=montage) raw = (RawArray(data=data, info=info)) # Setting up band-pass filter from 2 - 50 Hz raw.filter(2, 50, method='iir') ## Plot the PSD of the EEG data just to make sure it looks alright #raw.plot_psd(picks=[2]); from mne import make_fixed_length_events, Epochs # Make an events array with epoch times every .5 seconds event = make_fixed_length_events(raw, 1, duration=0.5) # Make an epochs array object from the raw dataset with events array event, length of 2 seconds epochs = Epochs(raw, event, tmin=0, tmax=4, preload=True)
def test_simulate_calculate_head_pos_chpi(): """Test calculation of cHPI positions with simulated data.""" # Read info dict from raw FIF file info = read_info(raw_fname) # Tune the info structure chpi_channel = u'STI201' ncoil = len(info['hpi_results'][0]['order']) coil_freq = 10 + np.arange(ncoil) * 5 hpi_subsystem = { 'event_channel': chpi_channel, 'hpi_coils': [{ 'event_bits': np.array([256, 0, 256, 256], dtype=np.int32) }, { 'event_bits': np.array([512, 0, 512, 512], dtype=np.int32) }, { 'event_bits': np.array([1024, 0, 1024, 1024], dtype=np.int32) }, { 'event_bits': np.array([2048, 0, 2048, 2048], dtype=np.int32) }], 'ncoil': ncoil } info['hpi_subsystem'] = hpi_subsystem for fi, freq in enumerate(coil_freq): info['hpi_meas'][0]['hpi_coils'][fi]['coil_freq'] = freq picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[]) info['sfreq'] = 100. # this will speed it up a lot info = pick_info(info, picks) info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201' info._update_redundant() info['projs'] = [] info_trans = info['dev_head_t']['trans'].copy() dev_head_pos_ini = np.concatenate( [rot_to_quat(info_trans[:3, :3]), info_trans[:3, 3]]) ez = np.array([0, 0, 1]) # Unit vector in z-direction of head coordinates # Define some constants duration = 10 # Time / s # Quotient of head position sampling frequency # and raw sampling frequency head_pos_sfreq_quotient = 0.01 # Round number of head positions to the next integer S = int(duration * info['sfreq'] * head_pos_sfreq_quotient) assert S == 10 dz = 0.001 # Shift in z-direction is 0.1mm for each step dev_head_pos = np.zeros((S, 10)) dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient dev_head_pos[:, 1:4] = dev_head_pos_ini[:3] dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \ np.outer(np.arange(S) * dz, ez) dev_head_pos[:, 7] = 1.0 # m/s dev_head_pos[:, 9] = dz / (info['sfreq'] * head_pos_sfreq_quotient) # Round number of samples to the next integer raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5))) raw = RawArray(raw_data, info) add_chpi(raw, dev_head_pos) quats = _calculate_chpi_positions( raw, t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient, t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient, t_window=1.0) _assert_quats(quats, dev_head_pos, dist_tol=0.001, angle_tol=1., vel_atol=4e-3) # 4 mm/s
def test_simulate_calculate_chpi_positions(): """Test calculation of cHPI positions with simulated data.""" # Read info dict from raw FIF file info = read_info(raw_fname) # Tune the info structure chpi_channel = u'STI201' ncoil = len(info['hpi_results'][0]['order']) coil_freq = 10 + np.arange(ncoil) * 5 hpi_subsystem = { 'event_channel': chpi_channel, 'hpi_coils': [{ 'event_bits': np.array([256, 0, 256, 256], dtype=np.int32) }, { 'event_bits': np.array([512, 0, 512, 512], dtype=np.int32) }, { 'event_bits': np.array([1024, 0, 1024, 1024], dtype=np.int32) }, { 'event_bits': np.array([2048, 0, 2048, 2048], dtype=np.int32) }], 'ncoil': ncoil } info['hpi_subsystem'] = hpi_subsystem for l, freq in enumerate(coil_freq): info['hpi_meas'][0]['hpi_coils'][l]['coil_freq'] = freq picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[]) info['sfreq'] = 100. # this will speed it up a lot info = pick_info(info, picks) info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201' info._update_redundant() info['projs'] = [] info_trans = info['dev_head_t']['trans'].copy() dev_head_pos_ini = np.concatenate( [rot_to_quat(info_trans[:3, :3]), info_trans[:3, 3]]) ez = np.array([0, 0, 1]) # Unit vector in z-direction of head coordinates # Define some constants duration = 30 # Time / s # Quotient of head position sampling frequency # and raw sampling frequency head_pos_sfreq_quotient = 0.1 # Round number of head positions to the next integer S = int(duration / (info['sfreq'] * head_pos_sfreq_quotient)) dz = 0.001 # Shift in z-direction is 0.1mm for each step dev_head_pos = np.zeros((S, 10)) dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient dev_head_pos[:, 1:4] = dev_head_pos_ini[:3] dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \ np.outer(np.arange(S) * dz, ez) dev_head_pos[:, 7] = 1.0 # cm/s dev_head_pos[:, 9] = 100 * dz / (info['sfreq'] * head_pos_sfreq_quotient) # Round number of samples to the next integer raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5))) raw = RawArray(raw_data, info) dip = Dipole(np.array([0.0, 0.1, 0.2]), np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), np.array([1e-9, 1e-9, 1e-9]), np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), np.array([1.0, 1.0, 1.0]), 'dip') sphere = make_sphere_model('auto', 'auto', info=info, relative_radii=(1.0, 0.9), sigmas=(0.33, 0.3)) fwd, stc = make_forward_dipole(dip, sphere, info) stc.resample(info['sfreq']) raw = simulate_raw(raw, stc, None, fwd['src'], sphere, cov=None, blink=False, ecg=False, chpi=True, head_pos=dev_head_pos, mindist=1.0, interp='zero', verbose=None) quats = _calculate_chpi_positions( raw, t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient, t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient, t_window=1.0) _assert_quats(quats, dev_head_pos, dist_tol=0.001, angle_tol=1.)
def test_psd(): """Tests the welch and multitaper PSD.""" raw = read_raw_fif(raw_fname) picks_psd = [0, 1] # Populate raw with sinusoids rng = np.random.RandomState(40) data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times) freqs_sig = [8., 50.] for ix, freq in zip(picks_psd, freqs_sig): data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times) first_samp = raw._first_samps[0] raw = RawArray(data, raw.info) tmin, tmax = 0, 20 # use a few seconds of data fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz n_fft = 128 # -- Raw -- kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all kws_welch = dict(n_fft=n_fft) kws_mt = dict(low_bias=True) funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func(raw, proj=False, **kws) psds_proj, freqs_proj = func(raw, proj=True, **kws) assert psds.shape == (len(kws['picks']), len(freqs)) assert np.sum(freqs < 0) == 0 assert np.sum(psds < 0) == 0 # Is power found where it should be ixs_max = np.argmax(psds, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert (np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj) # Array input shouldn't work pytest.raises(ValueError, func, raw[:3, :20][0]) # test n_per_seg in psd_welch (and padding) psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128, **kws_psd) psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128, **kws_psd) assert (len(freqs1) == np.floor(len(freqs2) / 2.)) assert (psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.)) kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq'])) with pytest.raises(ValueError, match='n_fft is not allowed to be > n_tim'): psd_welch(raw, proj=False, n_per_seg=None, **kws_psd) kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90)) with pytest.raises(ValueError, match='n_overlap cannot be greater'): psd_welch(raw, proj=False, **kws_psd) with pytest.raises(ValueError, match='No frequencies found'): psd_array_welch(np.zeros((1, 1000)), 1000., fmin=10, fmax=1) # -- Epochs/Evoked -- events = read_events(event_fname) events[:, 0] -= first_samp tmin, tmax, event_id = -0.5, 0.5, 1 epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None) evoked = epochs.average() tmin_full, tmax_full = -1, 1 epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full, picks=picks_psd, proj=False, preload=True, baseline=None) kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func( epochs[:1], proj=False, **kws) psds_proj, freqs_proj = func( epochs[:1], proj=True, **kws) psds_f, freqs_f = func( epochs_full[:1], proj=False, **kws) # this one will fail if you add for example 0.1 to tmin assert_array_almost_equal(psds, psds_f, 27) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj, 27) # Is power found where it should be ixs_max = np.argmax(psds.mean(0), axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert (np.abs(ixmax - ixtrue) < 2) assert (psds.shape == (1, len(kws['picks']), len(freqs))) assert (np.sum(freqs < 0) == 0) assert (np.sum(psds < 0) == 0) # Array input shouldn't work pytest.raises(ValueError, func, epochs.get_data()) # Testing evoked (doesn't work w/ compute_epochs_psd) psds_ev, freqs_ev = func( evoked, proj=False, **kws) psds_ev_proj, freqs_ev_proj = func( evoked, proj=True, **kws) # Is power found where it should be ixs_max = np.argmax(psds_ev, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs_ev)) assert (np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds_ev, psds_ev_proj, 27) assert (psds_ev.shape == (len(kws['picks']), len(freqs)))
print('Creating ica files') icas_names.append(icas_name) icas.append(ica) return icas, icas_names icas,icas_names = ica_function(save = False, overwrite= False) #icas[0]._pre_whiten(${1:raws_filt}, info, picks=64) bm = icas[0].get_components() #plot_psd(m) #%% b = icas[0].get_components() raw = RawArray(b, raws_filt[0].info) raw.plot_psd(area_mode=None, show=True, average=False,fmin =1.0, fmax=80.0, dB=False) #%% Load saved icas from icas_names = paths #BUG ICAS_NAMES not defined, you should run def loading_icas (): icas = [] for subj in range (len(subjects)): icas_names = os.path.join(dir_icas,'S'+ str(subjects[subj]) + '_ica.fif') loaded_ica = read_ica(icas_names[subj]) icas.append(loaded_ica) return icas
def test_fnirs_channel_naming_and_order_custom_optical_density(): """Ensure fNIRS channel checking on manually created data.""" data = np.random.normal(size=(6, 10)) # Start with a correctly named raw intensity dataset # These are the steps required to build an fNIRS Raw object from scratch ch_names = [ 'S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850' ] ch_types = np.repeat("fnirs_od", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f freqs = np.unique(_channel_frequencies(raw.info)) picks = _check_channels_ordered(raw.info, freqs) assert len(picks) == len(raw.ch_names) assert len(picks) == 6 # Check block naming for optical density ch_names = [ 'S1_D1 760', 'S2_D1 760', 'S3_D1 760', 'S1_D1 850', 'S2_D1 850', 'S3_D1 850' ] ch_types = np.repeat("fnirs_od", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.repeat([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f with pytest.raises(ValueError, match='channels not ordered correctly'): _check_channels_ordered(raw.info, [760, 850]) # and this is how you would fix the ordering, then it should pass raw.pick(picks=[0, 3, 1, 4, 2, 5]) _check_channels_ordered(raw.info, [760, 850]) # Check that if you mix types you get an error ch_names = [ 'S1_D1 hbo', 'S1_D1 hbr', 'S2_D1 hbo', 'S2_D1 hbr', 'S3_D1 hbo', 'S3_D1 hbr' ] ch_types = np.tile(["hbo", "hbr"], 3) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw2 = RawArray(data, info, verbose=True) raw.add_channels([raw2]) with pytest.raises(ValueError, match='does not support a combination'): _check_channels_ordered(raw.info, [760, 850])
n_signal = eeg_signal.shape[0] # the number of signals n_sample = eeg_signal.shape[1] # dimension of each signal # Normalize the data to be run b = eeg_signal b = ((b.T - np.mean(b, axis=1)) / np.std(b, axis=1)).T # Define the parameters # 128 kernels with size of 201 size_kernel = [2, 61] # size_kernel = [2, 51] ch_names = ['EEG%03d' % i for i in range(n_signal)] info = create_info(ch_names, sfreq=sfreq, ch_types='eeg') raw = RawArray(eeg_signal * 1e-6, info) raw.plot(scalings=dict(eeg='auto'), duration=300) # Optim options max_it = 200 # the number of iterations tol = np.float64(1e-3) # the stop threshold for the algorithm # RUN THE ALGORITHM [d, z, Dz, list_obj_val, list_obj_val_filter, list_obj_val_z, reconstr_err] = \ CSC.learn_conv_sparse_coder(b, size_kernel, max_it, tol, random_state=42) plt.figure() plt.plot(d[0, :]) plt.plot(d[1, :]) plt.show()
def test_resample(): """Test resample (with I/O and multiple files) """ tempdir = _TempDir() raw = Raw(fif_fname).crop(0, 3, False) raw.load_data() raw_resamp = raw.copy() sfreq = raw.info['sfreq'] # test parallel on upsample raw_resamp.resample(sfreq * 2, n_jobs=2) assert_equal(raw_resamp.n_times, len(raw_resamp.times)) raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif')) raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True) assert_equal(sfreq, raw_resamp.info['sfreq'] / 2) assert_equal(raw.n_times, raw_resamp.n_times / 2) assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times) assert_equal(raw._data.shape[0], raw_resamp._data.shape[0]) # test non-parallel on downsample raw_resamp.resample(sfreq, n_jobs=1) assert_equal(raw_resamp.info['sfreq'], sfreq) assert_equal(raw._data.shape, raw_resamp._data.shape) assert_equal(raw.first_samp, raw_resamp.first_samp) assert_equal(raw.last_samp, raw.last_samp) # upsampling then downsampling doubles resampling error, but this still # works (hooray). Note that the stim channels had to be sub-sampled # without filtering to be accurately preserved # note we have to treat MEG and EEG+STIM channels differently (tols) assert_allclose(raw._data[:306, 200:-200], raw_resamp._data[:306, 200:-200], rtol=1e-2, atol=1e-12) assert_allclose(raw._data[306:, 200:-200], raw_resamp._data[306:, 200:-200], rtol=1e-2, atol=1e-7) # now check multiple file support w/resampling, as order of operations # (concat, resample) should not affect our data raw1 = raw.copy() raw2 = raw.copy() raw3 = raw.copy() raw4 = raw.copy() raw1 = concatenate_raws([raw1, raw2]) raw1.resample(10.) raw3.resample(10.) raw4.resample(10.) raw3 = concatenate_raws([raw3, raw4]) assert_array_equal(raw1._data, raw3._data) assert_array_equal(raw1._first_samps, raw3._first_samps) assert_array_equal(raw1._last_samps, raw3._last_samps) assert_array_equal(raw1._raw_lengths, raw3._raw_lengths) assert_equal(raw1.first_samp, raw3.first_samp) assert_equal(raw1.last_samp, raw3.last_samp) assert_equal(raw1.info['sfreq'], raw3.info['sfreq']) # test resampling of stim channel # basic decimation stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) assert_allclose(raw.resample(8.)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) # decimation of multiple stim channels raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim'])) assert_allclose(raw.resample(8.)._data, [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0]]) # decimation that could potentially drop events if the decimation is # done naively stim = [0, 0, 0, 1, 1, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) assert_allclose(raw.resample(4.)._data, [[0, 1, 1, 0]]) # two events are merged in this case (warning) stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') raw.resample(8.) assert_true(len(w) == 1) # events are dropped in this case (warning) stim = [0, 1, 1, 0, 0, 1, 1, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') raw.resample(4.) assert_true(len(w) == 1) # test resampling events: this should no longer give a warning stim = [0, 1, 1, 0, 0, 1, 1, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) events = find_events(raw) raw, events = raw.resample(4., events=events) assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]])) # test copy flag stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) raw_resampled = raw.resample(4., copy=True) assert_true(raw_resampled is not raw) raw_resampled = raw.resample(4., copy=False) assert_true(raw_resampled is raw) # resample should still work even when no stim channel is present raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg'])) raw.resample(10) assert_true(len(raw) == 10)
def main(): print "Using MNE", mne.__version__ opts = parse_args() verbose = opts.debug # constants sfreq = 100.0 class_labels = {'left': 2, 'right': 3} # files train_fname = "data/custom/bci4/train/ds1g.txt" test_fname = "data/custom/bci4/test/ds1g.txt" #train_fname = "data/custom/bci4/active_train/ds1g.txt" #test_fname = "data/custom/bci4/active_test/ds1g.txt" ################# # LOAD DATA eval_start = time.clock() # load train data from training file [train_nparray, train_info] = file_to_nparray(train_fname, sfreq=sfreq, verbose=verbose) end = time.clock() print "train dataset", train_fname, "loaded in ", str( end - eval_start), "seconds" eval_start = time.clock() # load test data from test file [test_nparray, test_info] = file_to_nparray(test_fname, sfreq=sfreq, verbose=verbose) end = time.clock() print "test dataset", test_fname, "loaded in ", str(end - eval_start), "seconds" total_start = time.clock() ################## # CLASSIFY DATA # pick a subset of total electrodes, or else just get all of the channels of type 'eeg' picks = getPicks('motor16') or pick_types(train_info, eeg=True) # hyperparam 1 bandpass_filters = get_bandpass_ranges() # hyperparam 2 epoch_bounds = get_window_ranges() # extract X,y from train data read_start = time.clock() train_raw = RawArray(train_nparray, train_info, verbose=verbose) [train_X, train_y] = X_y_from_sliding_windows(train_raw, picks) test_raw = RawArray(test_nparray, test_info, verbose=verbose) [test_X, test_y] = X_y_from_sliding_windows(test_raw, picks) read_end = time.clock() print "read sliding windows in ", str(read_end - read_start), "seconds" print "train X", train_X.shape print "test X", test_X.shape # custom grid search estimator = CSPEstimator(bandpass_filters=bandpass_filters, epoch_bounds=[(0.0, 0.0)], num_spatial_filters=6, class_labels=class_labels, sfreq=sfreq, picks=picks, num_votes=6, consecutive=True) estimator.fit(train_X, train_y) # print "-------------------------------------------" score = estimator.score(test_X, test_y) print "-------------------------------------------" print "average estimator score", score print exit() # just a pause here to allow visual inspection of top classifiers picked by grid search time.sleep(15) # now we go into predict mode, in which we are going over the test data using sliding windows # this is a simulation of what would happen if we were in "online" mode with live data # for each window, a prediction is given by the ensemble of top classifiers # next to this, we see the actual labels from the real data (i.e. the y vector) print "-------------------------------------------" print "PREDICT" print exit()
def test_scalings_int(): """Test that auto scalings access samples using integers.""" raw = RawArray(np.zeros((1, 500)), create_info(1, 1000., 'eeg')) raw.plot(scalings='auto')
def simulate_nirs_raw(sfreq=3., amplitude=1., annot_desc='A', sig_dur=300., stim_dur=5., isi_min=15., isi_max=45., ch_name='Simulated'): """ Create simulated data. .. warning:: Work in progress: I am trying to think on the best API. Parameters ---------- sfreq : Number The sample rate. amplitude : Number, Array of numbers The amplitude of the signal to simulate in uM. annot_desc : String, Array of strings The name of the annotations for simulated amplitudes. sig_dur : Number The length of the signal to generate in seconds. stim_dur : Number, Array of numbers The length of the stimulus to generate in seconds. isi_min : Number The minimum duration of the inter stimulus interval in seconds. isi_max : Number The maximum duration of the inter stimulus interval in seconds. ch_name : String Channel name to be used in returned raw instance. Returns ------- raw : instance of Raw The generated raw instance. """ from nilearn.glm.first_level import make_first_level_design_matrix from pandas import DataFrame if type(amplitude) is not list: amplitude = [amplitude] if type(annot_desc) is not list: annot_desc = [annot_desc] if type(stim_dur) is not list: stim_dur = [stim_dur] frame_times = np.arange(sig_dur * sfreq) / sfreq assert len(amplitude) == len(annot_desc), "Same number of amplitudes as " \ "annotations required." assert len(amplitude) == len(stim_dur), "Same number of amplitudes as " \ "durations required." onset = 0. onsets = [] conditions = [] durations = [] while onset < sig_dur - 60: c_idx = np.random.randint(0, len(amplitude)) onset += np.random.uniform(isi_min, isi_max) + stim_dur[c_idx] onsets.append(onset) conditions.append(annot_desc[c_idx]) durations.append(stim_dur[c_idx]) events = DataFrame({ 'trial_type': conditions, 'onset': onsets, 'duration': durations }) dm = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=0) dm = dm.drop(columns='constant') annotations = Annotations(onsets, durations, conditions) info = create_info(ch_names=[ch_name], sfreq=sfreq, ch_types=['hbo']) for idx, annot in enumerate(annot_desc): if annot in dm.columns: dm[annot] *= amplitude[idx] a = np.sum(dm.to_numpy(), axis=1) * 1.e-6 a = a.reshape(-1, 1).T raw = RawArray(a, info, verbose=False) raw.set_annotations(annotations) return raw
def test_filter_picks(): """Test filter picking.""" data = np.random.RandomState(0).randn(3, 1000) fs = 1000. kwargs = dict(l_freq=None, h_freq=40.) filt = filter_data(data, fs, **kwargs) # don't include seeg or stim in this list because they are in the one below # to ensure default cases are treated properly for kind in ('eeg', 'grad', 'emg', 'misc'): for picks in (None, [-2], kind, 'k'): # With always at least one data channel info = create_info(['s', 'k', 't'], fs, ['seeg', kind, 'stim']) raw = RawArray(data.copy(), info) raw.filter(picks=picks, **kwargs) if picks is None: if kind in _DATA_CH_TYPES_SPLIT: # should be included want = np.concatenate((filt[:2], data[2:])) else: # shouldn't want = np.concatenate((filt[:1], data[1:])) else: # just the kind of interest ([-2], kind, 'j' should be eq.) want = np.concatenate((data[:1], filt[1:2], data[2:])) assert_allclose(raw.get_data(), want) # Now with sometimes no data channels info = create_info(['k', 't'], fs, [kind, 'stim']) raw = RawArray(data[1:].copy(), info.copy()) if picks is None and kind not in _DATA_CH_TYPES_SPLIT: with pytest.raises(ValueError, match='yielded no channels'): raw.filter(picks=picks, **kwargs) else: raw.filter(picks=picks, **kwargs) want = want[1:] assert_allclose(raw.get_data(), want)
def test_raw_reject(): """Test raw data getter with annotation reject.""" sfreq = 100. info = create_info(['a', 'b', 'c', 'd', 'e'], sfreq, ch_types='eeg') raw = RawArray(np.ones((5, 15000)), info) with pytest.warns(RuntimeWarning, match='outside the data range'): raw.set_annotations( Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD')) data, times = raw.get_data( [0, 1, 3, 4], 100, 11200, # 1-112 sec 'omit', return_times=True) bad_times = np.concatenate([ np.arange(200, 400), np.arange(10000, 10800), np.arange(10500, 11000) ]) expected_times = np.setdiff1d(np.arange(100, 11200), bad_times) / sfreq assert_allclose(times, expected_times) # with orig_time and complete overlap raw = read_raw_fif(fif_fname) raw.set_annotations( Annotations(onset=[1, 4, 5] + raw._first_time, duration=[1, 3, 1], description='BAD', orig_time=raw.info['meas_date'])) t_stop = 18. assert raw.times[-1] > t_stop n_stop = int(round(t_stop * raw.info['sfreq'])) n_drop = int(round(4 * raw.info['sfreq'])) assert len(raw.times) >= n_stop data, times = raw.get_data(range(10), 0, n_stop, 'omit', True) assert data.shape == (10, n_stop - n_drop) assert times[-1] == raw.times[n_stop - 1] assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) data, times = raw.get_data(range(10), 0, n_stop, 'NaN', True) assert_array_equal(data.shape, (10, n_stop)) assert times[-1] == raw.times[n_stop - 1] t_1, t_2 = raw.time_as_index([1, 2], use_rounding=True) assert np.isnan(data[:, t_1:t_2]).all() # 1s -2s assert not np.isnan(data[:, :t_1].any()) assert not np.isnan(data[:, t_2:].any()) assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0]) assert_array_equal(raw.get_data(), raw[:][0]) # Test _sync_onset times = [10, -88, 190] onsets = _sync_onset(raw, times) assert_array_almost_equal(onsets, times - raw.first_samp / raw.info['sfreq']) assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
def main(): print "Using MNE", mne.__version__ opts = parse_args() verbose = opts.debug # constants sfreq = 125.0 class_labels = {'left': 2, 'right': 3} # files train_fname = "data/custom/trials/motor-imagery-subject-A-train-1.csv" test_fname = "data/custom/trials/motor-imagery-subject-A-test-1.csv" #train_fname = "data/custom/trials/motor-imagery-trial-subject-A-10-26-2016_01-54-59.csv" #train_fname = "data/custom/bci4/train/ds1g.txt" #test_fname = "data/custom/bci4/test/ds1g.txt" #train_fname = "data/custom/bci4/active_train/ds1b.txt" #test_fname = "data/custom/bci4/active_test/ds1b.txt" ################# # LOAD DATA eval_start = time.clock() # load train data from training file [train_nparray, train_info] = file_to_nparray(train_fname, sfreq=sfreq, verbose=verbose) end = time.clock() print "train dataset", train_fname, "loaded in ", str( end - eval_start), "seconds" eval_start = time.clock() # load test data from test file [test_nparray, test_info] = file_to_nparray(test_fname, sfreq=sfreq, verbose=verbose) end = time.clock() print "test dataset", test_fname, "loaded in ", str(end - eval_start), "seconds" total_start = time.clock() ################## # CLASSIFY DATA # pick a subset of total electrodes, or else just get all of the channels of type 'eeg' picks = getPicks('openbci16') or pick_types(train_info, eeg=True) # hyperparam 1 bandpass_filters = get_bandpass_ranges() # hyperparam 2 epoch_bounds = get_window_ranges() # extract X,y from train data train_raw = RawArray(train_nparray, train_info, verbose=verbose) train_events = mne.find_events(train_raw, shortest_event=0, consecutive=True, verbose=verbose) train_epochs = Epochs(raw=train_raw, events=train_events, event_id=class_labels, tmin=-0.5, tmax=3.5, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=verbose) train_X = train_epochs.get_data() train_y = train_epochs.events[:, -1] - 2 # convert classes [2,3] to [0,1] # extract X,y from test data test_raw = RawArray(test_nparray, test_info, verbose=verbose) test_events = mne.find_events(test_raw, shortest_event=0, consecutive=True, verbose=verbose) test_epochs = Epochs(raw=test_raw, events=test_events, event_id=class_labels, tmin=-0.5, tmax=3.5, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=verbose) test_X = test_epochs.get_data() test_y = test_epochs.events[:, -1] - 2 # convert classes [2,3] to [0,1] # custom grid search estimator1 = CSPEstimator(bandpass_filters=bandpass_filters, epoch_bounds=epoch_bounds, num_spatial_filters=6, class_labels=class_labels, sfreq=sfreq, picks=picks, num_votes=6, consecutive=True) estimator1.fit(train_X, train_y) # exit() print # print print "-------------------------------------------" print "-------------------------------------------" print "-------------------------------------------" print "-------------------------------------------" print time.sleep(10) # custom grid search estimator2 = CSPEstimator(bandpass_filters=bandpass_filters, epoch_bounds=epoch_bounds, num_spatial_filters=6, class_labels=class_labels, sfreq=sfreq, picks=picks, num_votes=6, consecutive=True) estimator2.fit(train_X, train_y, type="lr") # print "-------------------------------------------" print "LDA" score = estimator1.score(test_X, test_y) print "average estimator score", score print "-------------------------------------------" print "LOGISTIC REGRESSION" score = estimator2.score(test_X, test_y) print "average estimator score", score print "training run time", round(time.clock() - total_start, 1), "sec" exit() # just a pause here to allow visual inspection of top classifiers picked by grid search time.sleep(15) # now we go into predict mode, in which we are going over the test data using sliding windows # this is a simulation of what would happen if we were in "online" mode with live data # for each window, a prediction is given by the ensemble of top classifiers # next to this, we see the actual labels from the real data (i.e. the y vector) print "-------------------------------------------" print "PREDICT" print #################################################### # looping over test data in windows online_data = test_raw._data[picks] online_labels = test_raw.pick_types(stim=True)._data print "test_X", test_X.shape print "test RAW data", online_data.shape print "test RAW labels", online_labels.shape window_size = 150 # 50 sample = 0.5 s window_overlap = 150 # np.set_printoptions(suppress=True) for i in xrange(0, online_data.shape[1] - window_size, window_overlap): start = i end = i + window_size window = online_data[:, start:end] class_labels = online_labels[:, start:end] #print window.shape #print class_labels estimator.predict(window, class_labels) #print i,":",i+window_size exit() estimator.predict(test_X[0:10], test_y[0:10]) print print "total run time", round(time.clock() - total_start, 1), "sec" exit()
def test_annotation_filtering(): """Test that annotations work properly with filtering.""" # Create data with just a DC component data = np.ones((1, 1000)) info = create_info(1, 1000., 'eeg') raws = [RawArray(data * (ii + 1), info) for ii in range(4)] kwargs_pass = dict(l_freq=None, h_freq=50., fir_design='firwin') kwargs_stop = dict(l_freq=50., h_freq=None, fir_design='firwin') # lowpass filter, which should not modify the data raws_pass = [raw.copy().filter(**kwargs_pass) for raw in raws] # highpass filter, which should zero it out raws_stop = [raw.copy().filter(**kwargs_stop) for raw in raws] # concat the original and the filtered segments raws_concat = concatenate_raws([raw.copy() for raw in raws]) raws_zero = raws_concat.copy().apply_function(lambda x: x * 0) raws_pass_concat = concatenate_raws(raws_pass) raws_stop_concat = concatenate_raws(raws_stop) # make sure we did something reasonable with our individual-file filtering assert_allclose(raws_concat[0][0], raws_pass_concat[0][0], atol=1e-14) assert_allclose(raws_zero[0][0], raws_stop_concat[0][0], atol=1e-14) # ensure that our Annotations cut up the filtering properly raws_concat_pass = raws_concat.copy().filter(skip_by_annotation='edge', **kwargs_pass) assert_allclose(raws_concat[0][0], raws_concat_pass[0][0], atol=1e-14) raws_concat_stop = raws_concat.copy().filter(skip_by_annotation='edge', **kwargs_stop) assert_allclose(raws_zero[0][0], raws_concat_stop[0][0], atol=1e-14) # one last test: let's cut out a section entirely: # here the 1-3 second window should be skipped raw = raws_concat.copy() raw.annotations.append(1., 2., 'foo') with catch_logging() as log: raw.filter(l_freq=50., h_freq=None, fir_design='firwin', skip_by_annotation='foo', verbose='info') log = log.getvalue() assert '2 contiguous segments' in log raw.annotations.append(2., 1., 'foo') # shouldn't change anything with catch_logging() as log: raw.filter(l_freq=50., h_freq=None, fir_design='firwin', skip_by_annotation='foo', verbose='info') log = log.getvalue() assert '2 contiguous segments' in log # our filter will zero out anything not skipped: mask = np.concatenate((np.zeros(1000), np.ones(2000), np.zeros(1000))) expected_data = raws_concat[0][0][0] * mask assert_allclose(raw[0][0][0], expected_data, atol=1e-14) # Let's try another one raw = raws[0].copy() raw.set_annotations(Annotations([0.], [0.5], ['BAD_ACQ_SKIP'])) my_data, times = raw.get_data(reject_by_annotation='omit', return_times=True) assert_allclose(times, raw.times[500:]) assert my_data.shape == (1, 500) raw_filt = raw.copy().filter(skip_by_annotation='bad_acq_skip', **kwargs_stop) expected = data.copy() expected[:, 500:] = 0 assert_allclose(raw_filt[:][0], expected, atol=1e-14) raw = raws[0].copy() raw.set_annotations(Annotations([0.5], [0.5], ['BAD_ACQ_SKIP'])) my_data, times = raw.get_data(reject_by_annotation='omit', return_times=True) assert_allclose(times, raw.times[:500]) assert my_data.shape == (1, 500) raw_filt = raw.copy().filter(skip_by_annotation='bad_acq_skip', **kwargs_stop) expected = data.copy() expected[:, :500] = 0 assert_allclose(raw_filt[:][0], expected, atol=1e-14)
def test_basics(): """Test annotation class.""" raw = read_raw_fif(fif_fname) assert raw.annotations is not None # XXX to be fixed in #5416 assert len(raw.annotations.onset) == 0 # XXX to be fixed in #5416 pytest.raises(IOError, read_annotations, fif_fname) onset = np.array(range(10)) duration = np.ones(10) description = np.repeat('test', 10) dt = datetime.utcnow() meas_date = raw.info['meas_date'] # Test time shifts. for orig_time in [None, dt, meas_date[0], meas_date]: annot = Annotations(onset, duration, description, orig_time) pytest.raises(ValueError, Annotations, onset, duration, description[:9]) pytest.raises(ValueError, Annotations, [onset, 1], duration, description) pytest.raises(ValueError, Annotations, onset, [duration, 1], description) # Test combining annotations with concatenate_raws raw2 = raw.copy() delta = raw.times[-1] + 1. / raw.info['sfreq'] orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2._first_time) offset = orig_time - _handle_meas_date(raw2.info['meas_date']) annot = Annotations(onset, duration, description, orig_time) assert ' segments' in repr(annot) raw2.set_annotations(annot) assert_array_equal(raw2.annotations.onset, onset + offset) assert id(raw2.annotations) != id(annot) concatenate_raws([raw, raw2]) assert_and_remove_boundary_annot(raw) assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5) assert_array_equal(annot.duration, raw.annotations.duration) assert_array_equal(raw.annotations.description, np.repeat('test', 10)) # Test combining with RawArray and orig_times data = np.random.randn(2, 1000) * 10e-12 sfreq = 100. info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2, sfreq=sfreq) info['meas_date'] = (np.pi, 0) raws = [] for first_samp in [12300, 100, 12]: raw = RawArray(data.copy(), info, first_samp=first_samp) ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq) raw.set_annotations(ants) raws.append(raw) raw = RawArray(data.copy(), info) raw.set_annotations(Annotations([1.], [.5], 'x', None)) raws.append(raw) raw = concatenate_raws(raws, verbose='debug') assert_and_remove_boundary_annot(raw, 3) assert_array_equal(raw.annotations.onset, [124., 125., 134., 135., 144., 145., 154.]) raw.annotations.delete(2) assert_array_equal(raw.annotations.onset, [124., 125., 135., 144., 145., 154.]) raw.annotations.append(5, 1.5, 'y') assert_array_equal(raw.annotations.onset, [5., 124., 125., 135., 144., 145., 154.]) assert_array_equal(raw.annotations.duration, [1.5, .5, .5, .5, .5, .5, .5]) assert_array_equal(raw.annotations.description, ['y', 'x', 'x', 'x', 'x', 'x', 'x'])
def test_psd_welch_average_kwarg(kind): """Test `average` kwarg of psd_welch().""" raw = read_raw_fif(raw_fname) picks_psd = [0, 1] # Populate raw with sinusoids rng = np.random.RandomState(40) data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times) freqs_sig = [8., 50.] for ix, freq in zip(picks_psd, freqs_sig): data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times) first_samp = raw._first_samps[0] raw = RawArray(data, raw.info) tmin, tmax = -0.5, 0.5 fmin, fmax = 0, np.inf n_fft = 256 n_per_seg = 128 n_overlap = 0 event_id = 2 events = read_events(event_fname) events[:, 0] -= first_samp kws = dict(fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_fft=n_fft, n_per_seg=n_per_seg, n_overlap=n_overlap, picks=picks_psd) if kind == 'raw': inst = raw elif kind == 'epochs': inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None) elif kind == 'evoked': inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None).average() else: raise ValueError('Unknown parametrization passed to test, check test ' 'for typos.') psds_mean, freqs_mean = psd_welch(inst=inst, average='mean', **kws) psds_median, freqs_median = psd_welch(inst=inst, average='median', **kws) psds_unagg, freqs_unagg = psd_welch(inst=inst, average=None, **kws) # Frequencies should be equal across all "average" types, as we feed in # the exact same data. assert_allclose(freqs_mean, freqs_median) assert_allclose(freqs_mean, freqs_unagg) # For `average=None`, the last dimension contains the un-aggregated # segments. assert psds_mean.shape == psds_median.shape assert psds_mean.shape == psds_unagg.shape[:-1] assert_allclose(psds_mean, psds_unagg.mean(axis=-1)) # SciPy's welch() function corrects the median PSD for its bias relative to # the mean. from scipy.signal.spectral import _median_bias median_bias = _median_bias(psds_unagg.shape[-1]) assert_allclose(psds_median, np.median(psds_unagg, axis=-1) / median_bias)
def preprocess_ts(ts_file,orig_channel_names_file,orig_channel_coords_file, h_freq, orig_sfreq, down_sfreq ,prefiltered = False): from mne.io import RawArray from mne import create_info import os import numpy as np #### load electrode names elec_names = [line.strip() for line in open(orig_channel_names_file)] #print elec_names ### save electrode locations elec_loc = np.loadtxt(orig_channel_coords_file) #print elec_loc ### no modification on electrode names and locations correct_elec_loc = elec_loc correct_elec_names = elec_names print len(correct_elec_names) print len(correct_elec_loc) ### save electrode locations channel_coords_file = os.path.abspath("correct_channel_coords.txt") np.savetxt(channel_coords_file ,correct_elec_loc , fmt = '%s') #### save electrode names channel_names_file = os.path.abspath("correct_channel_names.txt") np.savetxt(channel_names_file,correct_elec_names , fmt = '%s') ##### downsampling on data if orig_sfreq != down_sfreq: ts = np.load(ts_file) print ts.shape raw = RawArray(ts, info = create_info(ch_names = elec_names, sfreq = orig_sfreq)) indexes_good_elec = np.arange(len(elec_names)) print indexes_good_elec if prefiltered == False: raw.filter(l_freq = None, h_freq = down_sfreq, picks = indexes_good_elec) raw.resample(sfreq = down_sfreq,npad = 100) downsampled_ts,times = raw[:,:] print downsampled_ts.shape downsampled_ts_file = os.path.abspath("downsampled_ts.npy") np.save(downsampled_ts_file,downsampled_ts) print raw.info['sfreq'] return downsampled_ts_file,channel_coords_file,channel_names_file,raw.info['sfreq'] else: print "No downsampling was applied as orig_sfreq and down_sfreq are identical" return ts_file,channel_coords_file,channel_names_file,orig_sfreq