def test_io_events(): """Test IO for events """ events = mne.read_events(fname) mne.write_events('events.fif', events) events2 = mne.read_events('events.fif') assert_array_almost_equal(events, events2)
def test_io_cov(): """Test IO for noise covariance matrices """ events = mne.read_events(fname) mne.write_events('events.fif', events) events2 = mne.read_events(fname) assert_array_almost_equal(events, events2)
def test_ems(): """Test event-matched spatial filters""" raw = io.Raw(raw_fname, preload=False) # create unequal number of events events = read_events(event_name) events[-2, 2] = 3 picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l']) epochs.equalize_event_counts(epochs.event_id, copy=False) assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah']) surrogates, filters, conditions = compute_ems(epochs) assert_equal(list(set(conditions)), [1, 3]) events = read_events(event_name) event_id2 = dict(aud_l=1, aud_r=2, vis_l=3) epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs.equalize_event_counts(epochs.event_id, copy=False) n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']]) assert_raises(ValueError, compute_ems, epochs) surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l']) assert_equal(n_expected, len(surrogates)) assert_equal(n_expected, len(conditions)) assert_equal(list(set(conditions)), [2, 3]) raw.close()
def test_info(): """Test info object""" raw = io.Raw(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() events = read_events(event_name) # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert_true('a' in info) assert_true('b' in info) info[42] = 'foo' assert_true(info[42] == 'foo') # test info attribute in API objects for obj in [raw, epochs, evoked]: assert_true(isinstance(obj.info, Info)) info_str = '%s' % obj.info assert_equal(len(info_str.split('\n')), (len(obj.info.keys()) + 2)) assert_true(all(k in info_str for k in obj.info.keys()))
def test_info(): """Test info object""" raw = Raw(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() events = read_events(event_name) # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert_true('a' in info) assert_true('b' in info) info[42] = 'foo' assert_true(info[42] == 'foo') # Test info attribute in API objects for obj in [raw, epochs, evoked]: assert_true(isinstance(obj.info, Info)) info_str = '%s' % obj.info assert_equal(len(info_str.split('\n')), len(obj.info.keys()) + 2) assert_true(all(k in info_str for k in obj.info.keys())) # Test read-only fields info = raw.info.copy() nchan = len(info['chs']) ch_names = [ch['ch_name'] for ch in info['chs']] assert_equal(info['nchan'], nchan) assert_equal(list(info['ch_names']), ch_names) # Deleting of regular fields should work info['foo'] = 'bar' del info['foo'] # Test updating of fields del info['chs'][-1] info._update_redundant() assert_equal(info['nchan'], nchan - 1) assert_equal(list(info['ch_names']), ch_names[:-1]) info['chs'][0]['ch_name'] = 'foo' info._update_redundant() assert_equal(info['ch_names'][0], 'foo') # Test casting to and from a dict info_dict = dict(info) info2 = Info(info_dict) assert_equal(info, info2)
def test_info(): """Test info object.""" raw = read_raw_fif(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() events = read_events(event_name) # Test subclassing was successful. info = Info(a=7, b="aaaaa") assert_true("a" in info) assert_true("b" in info) info[42] = "foo" assert_true(info[42] == "foo") # Test info attribute in API objects for obj in [raw, epochs, evoked]: assert_true(isinstance(obj.info, Info)) info_str = "%s" % obj.info assert_equal(len(info_str.split("\n")), len(obj.info.keys()) + 2) assert_true(all(k in info_str for k in obj.info.keys())) # Test read-only fields info = raw.info.copy() nchan = len(info["chs"]) ch_names = [ch["ch_name"] for ch in info["chs"]] assert_equal(info["nchan"], nchan) assert_equal(list(info["ch_names"]), ch_names) # Deleting of regular fields should work info["foo"] = "bar" del info["foo"] # Test updating of fields del info["chs"][-1] info._update_redundant() assert_equal(info["nchan"], nchan - 1) assert_equal(list(info["ch_names"]), ch_names[:-1]) info["chs"][0]["ch_name"] = "foo" info._update_redundant() assert_equal(info["ch_names"][0], "foo") # Test casting to and from a dict info_dict = dict(info) info2 = Info(info_dict) assert_equal(info, info2)
def _get_data(mode='real'): """Get data.""" raw = read_raw_fif(raw_fname) events = mne.read_events(event_fname)[0:100] if mode == 'real': # Read raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set picks picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') # Read several epochs event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, reject=dict(grad=4000e-13, mag=4e-12)) elif mode == 'sin': # Create an epochs object with one epoch and one channel of artificial # data event_id, tmin, tmax = 1, 0.0, 1.0 epochs = mne.Epochs(raw, events[0:5], event_id, tmin, tmax, picks=[0], preload=True, reject=dict(grad=4000e-13)) freq = 10 epochs._data = np.sin(2 * np.pi * freq * epochs.times)[None, None, :] return epochs
def test_n_components_none(): """Test n_components=None.""" raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() events = read_events(event_name) picks = pick_types(raw.info, eeg=True, meg=False) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) max_pca_components = 10 n_components = None random_state = 12345 tempdir = _TempDir() output_fname = op.join(tempdir, 'test_ica-ica.fif') ica = ICA(max_pca_components=max_pca_components, n_components=n_components, random_state=random_state) with warnings.catch_warnings(record=True): # convergence ica.fit(epochs) ica.save(output_fname) ica = read_ica(output_fname) # ICA.fit() replaced max_pca_components, which was previously None, # with the appropriate integer value. assert_equal(ica.max_pca_components, 10) assert_is_none(ica.n_components)
def test_psdestimator(): """Test methods of PSDEstimator """ raw = io.Raw(raw_fname, preload=False) events = read_events(event_name) picks = pick_types( raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs( raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() psd = PSDEstimator(2 * np.pi, 0, np.inf) y = epochs.events[:, -1] X = psd.fit_transform(epochs_data, y) assert_true(X.shape[0] == epochs_data.shape[0]) assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X) # Test init exception assert_raises(ValueError, psd.fit, epochs, y) assert_raises(ValueError, psd.transform, epochs, y)
def test_unsupervised_spatial_filter(): """Test unsupervised spatial filter.""" from sklearn.decomposition import PCA from sklearn.kernel_ridge import KernelRidge raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False) # Test estimator assert_raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2)) # Test fit X = epochs.get_data() n_components = 4 usf = UnsupervisedSpatialFilter(PCA(n_components)) usf.fit(X) usf1 = UnsupervisedSpatialFilter(PCA(n_components)) # test transform assert_equal(usf.transform(X).ndim, 3) # test fit_transform assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X)) # assert shape assert_equal(usf.transform(X).shape[1], n_components) # Test with average param usf = UnsupervisedSpatialFilter(PCA(4), average=True) usf.fit_transform(X) assert_raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
def test_define_events(): """Test defining response events.""" events = read_events(fname) raw = read_raw_fif(raw_fname) events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'], .2, 0.7, 42, 99) n_target = events[events[:, 2] == 5].shape[0] n_miss = events_[events_[:, 2] == 99].shape[0] n_target_ = events_[events_[:, 2] == 42].shape[0] assert_true(n_target_ == (n_target - n_miss)) events = np.array([[0, 0, 1], [375, 0, 2], [500, 0, 1], [875, 0, 3], [1000, 0, 1], [1375, 0, 3], [1100, 0, 1], [1475, 0, 2], [1500, 0, 1], [1875, 0, 2]]) true_lag_nofill = [1500., 1500., 1500.] true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.] n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5) n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99) assert_array_equal(true_lag_fill, lag_fill) assert_array_equal(true_lag_nofill, lag_nofill)
def _get_data(): # Read raw data raw = Raw(raw_fname) raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set picks picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') # Read several epochs event_id, tmin, tmax = 1, -0.2, 0.5 events = mne.read_events(event_fname)[0:100] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12)) # Create an epochs object with one epoch and one channel of artificial data event_id, tmin, tmax = 1, 0.0, 1.0 epochs_sin = mne.Epochs(raw, events[0:5], event_id, tmin, tmax, proj=True, picks=[0], baseline=(None, 0), preload=True, reject=dict(grad=4000e-13)) freq = 10 epochs_sin._data = np.sin(2 * np.pi * freq * epochs_sin.times)[None, None, :] return epochs, epochs_sin
def test_eog_channel(method): """Test that EOG channel is included when performing ICA.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname, preload=True) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=True, ecg=False, eog=True, exclude='bads') epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) n_components = 0.9 ica = ICA(n_components=n_components, method=method) # Test case for MEG and EOG data. Should have EOG channel for inst in [raw, epochs]: picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:4] picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False, eog=True, exclude='bads') picks1 = np.append(picks1a, picks1b) ica.fit(inst, picks=picks1) assert (any('EOG' in ch for ch in ica.ch_names)) # Test case for MEG data. Should have no EOG channel for inst in [raw, epochs]: picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:5] ica.fit(inst, picks=picks1) assert not any('EOG' in ch for ch in ica.ch_names)
def test_n_components_and_max_pca_components_none(method): """Test n_components and max_pca_components=None.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() events = read_events(event_name) picks = pick_types(raw.info, eeg=True, meg=False) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) max_pca_components = None n_components = None random_state = 12345 tempdir = _TempDir() output_fname = op.join(tempdir, 'test_ica-ica.fif') ica = ICA(max_pca_components=max_pca_components, method=method, n_components=n_components, random_state=random_state) with pytest.warns(None): # convergence ica.fit(epochs) ica.save(output_fname) ica = read_ica(output_fname) # ICA.fit() replaced max_pca_components, which was previously None, # with the appropriate integer value. assert_equal(ica.max_pca_components, epochs.info['nchan']) assert ica.n_components is None
def test_scaler(): """Test methods of Scaler """ raw = fiff.Raw(raw_fname, preload=False) events = read_events(event_name) picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() scaler = Scaler(epochs.info) y = epochs.events[:, -1] # np invalid divide value warnings with warnings.catch_warnings(record=True): X = scaler.fit_transform(epochs_data, y) assert_true(X.shape == epochs_data.shape) X2 = scaler.fit(epochs_data, y).transform(epochs_data) assert_array_equal(X2, X) # Test init exception assert_raises(ValueError, scaler.fit, epochs, y) assert_raises(ValueError, scaler.transform, epochs, y)
def test_continuous_regression_no_overlap(): """Test regression without overlap correction, on real data.""" tmin, tmax = -.1, .5 raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.apply_proj() events = mne.read_events(event_fname) event_id = dict(audio_l=1, audio_r=2) raw = raw.pick_channels(raw.ch_names[:2]) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=None, reject=None) revokeds = linear_regression_raw(raw, events, event_id, tmin=tmin, tmax=tmax, reject=None) # Check that evokeds and revokeds are nearly equivalent for cond in event_id.keys(): assert_allclose(revokeds[cond].data, epochs[cond].average().data, rtol=1e-15) # Test events that will lead to "duplicate" errors old_latency = events[1, 0] events[1, 0] = events[0, 0] assert_raises(ValueError, linear_regression_raw, raw, events, event_id, tmin, tmax) events[1, 0] = old_latency events[:, 0] = range(len(events)) assert_raises(ValueError, linear_regression_raw, raw, events, event_id, tmin, tmax, decim=2)
def test_regularized_csp(): """Test Common Spatial Patterns algorithm using regularized covariance.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] n_components = 3 reg_cov = [None, 0.05, 'ledoit_wolf', 'oas'] for reg in reg_cov: csp = CSP(n_components=n_components, reg=reg, norm_trace=False) csp.fit(epochs_data, epochs.events[:, -1]) y = epochs.events[:, -1] X = csp.fit_transform(epochs_data, y) assert_true(csp.filters_.shape == (n_channels, n_channels)) assert_true(csp.patterns_.shape == (n_channels, n_channels)) assert_array_almost_equal(csp.fit(epochs_data, y). transform(epochs_data), X) # test init exception assert_raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) assert_raises(ValueError, csp.fit, epochs, y) assert_raises(ValueError, csp.transform, epochs) csp.n_components = n_components sources = csp.transform(epochs_data) assert_true(sources.shape[1] == n_components)
def test_concatenatechannels(): """Test methods of ConcatenateChannels """ raw = fiff.Raw(raw_fname, preload=False) events = read_events(event_name) picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] with warnings.catch_warnings(record=True) as w: epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() concat = ConcatenateChannels(epochs.info) y = epochs.events[:, -1] X = concat.fit_transform(epochs_data, y) # Check data dimensions assert_true(X.shape[0] == epochs_data.shape[0]) assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2]) assert_array_equal(concat.fit(epochs_data, y).transform(epochs_data), X) # Check if data is preserved n_times = epochs_data.shape[2] assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times]) # Test init exception assert_raises(ValueError, concat.fit, epochs, y) assert_raises(ValueError, concat.transform, epochs, y)
def test_csp(): """Test Common Spatial Patterns algorithm on epochs """ raw = fiff.Raw(raw_fname, preload=False) events = read_events(event_name) picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] n_components = 3 csp = CSP(n_components=n_components) csp.fit(epochs_data, epochs.events[:, -1]) y = epochs.events[:, -1] X = csp.fit_transform(epochs_data, y) assert_true(csp.filters_.shape == (n_channels, n_channels)) assert_true(csp.patterns_.shape == (n_channels, n_channels)) assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data), X) # test init exception assert_raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) assert_raises(ValueError, csp.fit, epochs, y) assert_raises(ValueError, csp.transform, epochs, y) csp.n_components = n_components sources = csp.transform(epochs_data) assert_true(sources.shape[1] == n_components)
def _get_data(): """Read in data used in tests.""" # read forward model forward = mne.read_forward_solution(fname_fwd) # read data raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.read_events(fname_event) event_id, tmin, tmax = 1, -0.1, 0.15 # decimate for speed left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, selection=left_temporal_channels) picks = picks[::2] raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info.normalize_proj() # avoid projection warnings epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0.), preload=True, reject=reject) noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15) return epochs, data_cov, noise_cov, forward
def test_filterestimator(): """Test methods of FilterEstimator """ raw = io.Raw(raw_fname, preload=False) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads") picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() # Add tests for different combinations of l_freq and h_freq filt = FilterEstimator(epochs.info, l_freq=1, h_freq=40) y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) assert_true(X.shape == epochs_data.shape) assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X) filt = FilterEstimator(epochs.info, l_freq=0, h_freq=40) y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1) y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning assert_raises(ValueError, filt.fit_transform, epochs_data, y) filt = FilterEstimator(epochs.info, l_freq=1, h_freq=None) with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) # Test init exception assert_raises(ValueError, filt.fit, epochs, y) assert_raises(ValueError, filt.transform, epochs, y)
def test_compute_proj_epochs(): """Test SSP computation on epochs""" event_id, tmin, tmax = 1, -0.2, 0.3 raw = Raw(raw_fname, preload=True) events = read_events(event_fname) bad_ch = 'MEG 2443' picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude=[]) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, proj=False) evoked = epochs.average() projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1) write_proj(op.join(tempdir, 'proj.fif.gz'), projs) for p_fname in [proj_fname, proj_gz_fname, op.join(tempdir, 'proj.fif.gz')]: projs2 = read_proj(p_fname) assert_true(len(projs) == len(projs2)) for p1, p2 in zip(projs, projs2): assert_true(p1['desc'] == p2['desc']) assert_true(p1['data']['col_names'] == p2['data']['col_names']) assert_true(p1['active'] == p2['active']) # compare with sign invariance p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0]) p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') mask = np.ones(p1_data.size, dtype=np.bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] corr = np.corrcoef(p1_data, p2_data)[0, 1] assert_array_almost_equal(corr, 1.0, 5) # test that you can compute the projection matrix projs = activate_proj(projs) proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[]) assert_true(nproj == 2) assert_true(U.shape[1] == 2) # test that you can save them epochs.info['projs'] += projs evoked = epochs.average() evoked.save(op.join(tempdir, 'foo.fif')) projs = read_proj(proj_fname) projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0) assert_true(len(projs_evoked) == 2) # XXX : test something # test parallelization projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2) projs = activate_proj(projs) proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[]) assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
def _get_data(): raw = io.Raw(raw_fname, add_eeg_ref=False) events = read_events(event_name) picks = pick_types(raw.info, meg=True, eeg=True, stim=True, ecg=True, eog=True, include=['STI 014'], exclude='bads') return raw, events, picks
def test_epochs_vectorizer(): """Test methods of EpochsVectorizer """ raw = io.Raw(raw_fname, preload=False) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads") picks = picks[1:13:3] with warnings.catch_warnings(record=True): epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() vector = EpochsVectorizer(epochs.info) y = epochs.events[:, -1] X = vector.fit_transform(epochs_data, y) # Check data dimensions assert_true(X.shape[0] == epochs_data.shape[0]) assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2]) assert_array_equal(vector.fit(epochs_data, y).transform(epochs_data), X) # Check if data is preserved n_times = epochs_data.shape[2] assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times]) # Check inverse transform Xi = vector.inverse_transform(X, y) assert_true(Xi.shape[0] == epochs_data.shape[0]) assert_true(Xi.shape[1] == epochs_data.shape[1]) assert_array_equal(epochs_data[0, 0, 0:n_times], Xi[0, 0, 0:n_times]) # Test init exception assert_raises(ValueError, vector.fit, epochs, y) assert_raises(ValueError, vector.transform, epochs, y)
def test_stockwell_api(): """Test stockwell functions.""" raw = read_raw_fif(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 event_name = op.join(base_dir, 'test-eve.fif') events = read_events(event_name) epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros. event_id, tmin, tmax, picks=[0, 1, 3]) for fmin, fmax in [(None, 50), (5, 50), (5, None)]: with warnings.catch_warnings(record=True): # zero papdding power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, return_itc=True) if fmax is not None: assert_true(power.freqs.max() <= fmax) with warnings.catch_warnings(record=True): # padding power_evoked = tfr_stockwell(epochs.average(), fmin=fmin, fmax=fmax, return_itc=False) # for multitaper these don't necessarily match, but they seem to # for stockwell... if this fails, this maybe could be changed # just to check the shape assert_array_almost_equal(power_evoked.data, power.data) assert_true(isinstance(power, AverageTFR)) assert_true(isinstance(itc, AverageTFR)) assert_equal(power.data.shape, itc.data.shape) assert_true(itc.data.min() >= 0.0) assert_true(itc.data.max() <= 1.0) assert_true(np.log(power.data.max()) * 20 <= 0.0) assert_true(np.log(power.data.max()) * 20 <= 0.0)
def _get_data(): raw = io.read_raw_fif(raw_fname, add_eeg_ref=False, verbose=False, preload=True) events = read_events(event_name) picks = pick_types(raw.info, meg=False, eeg=True, stim=False, ecg=False, eog=False, exclude='bads')[::8] return raw, events, picks
def test_reject_epochs(): """Test of epochs rejection """ event_id = 1 tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = fiff.Raw(raw_fname) events = mne.read_events(event_name) picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=True, eog=True, include=['STI 014']) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6), flat=dict(grad=1e-15, mag=1e-15)) data = epochs.get_data() n_events = len(epochs.events) n_clean_epochs = len(data) # Should match # mne_process_raw --raw test_raw.fif --projoff \ # --saveavetag -ave --ave test.ave --filteroff assert n_events > n_clean_epochs assert n_clean_epochs == 3
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3), event_id_gen=dict(aud_l=2, vis_l=4), test_times=None): """Aux function for testing GAT viz.""" with warnings.catch_warnings(record=True): # deprecated gat = GeneralizationAcrossTime() raw = read_raw_fif(raw_fname) raw.add_proj([], remove_existing=True) events = read_events(event_name) picks = pick_types(raw.info, meg='mag', stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] decim = 30 # Test on time generalization within one condition with warnings.catch_warnings(record=True): epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, decim=decim) epochs_list = [epochs[k] for k in event_id] equalize_epoch_counts(epochs_list) epochs = concatenate_epochs(epochs_list) # Test default running with warnings.catch_warnings(record=True): # deprecated gat = GeneralizationAcrossTime(test_times=test_times) gat.fit(epochs) gat.score(epochs) return gat
def test_find_events(): """Test find events in raw file """ events = mne.read_events(fname) raw = mne.fiff.Raw(raw_fname) events2 = mne.find_events(raw) assert_array_almost_equal(events, events2)
def _load_data(): """Helper function to load data.""" # It is more memory efficient to load data in a separate # function so it's loaded on-demand raw = io.read_raw_fif(raw_fname, add_eeg_ref=False) events = read_events(event_name) picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude=[]) # select every second channel for faster speed but compensate by using # mode='accurate'. picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1::2] picks = pick_types(raw.info, meg=True, eeg=True, exclude=[]) with warnings.catch_warnings(record=True): # proj epochs_eeg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_eeg, preload=True, reject=dict(eeg=80e-6)) epochs_meg = Epochs( raw, events, event_id, tmin, tmax, picks=picks_meg, preload=True, reject=dict(grad=1000e-12, mag=4e-12) ) epochs = Epochs( raw, events, event_id, tmin, tmax, picks=picks, preload=True, reject=dict(eeg=80e-6, grad=1000e-12, mag=4e-12), ) return raw, epochs, epochs_eeg, epochs_meg
def compute_lcmv_example_data(data_path, fname=None): raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif' fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif' # Get epochs event_id, tmin, tmax = [1, 2], -0.2, 0.5 # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels events = mne.read_events(event_fname) # Set up pick list: gradiometers and magnetometers, excluding bad channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads') # Pick the channels of interest raw.pick_channels([raw.ch_names[pick] for pick in picks]) # Re-normalize our empty-room projectors, so they are fine after # subselection raw.info.normalize_proj() # Read epochs proj = False # already applied epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0), preload=True, proj=proj, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) evoked = epochs.average() # Read regularized noise covariance and compute regularized data covariance noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk') data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15, method='shrunk') # Read forward model forward = mne.read_forward_solution(fname_fwd) # Compute weights of free orientation (vector) beamformer with weight # normalization (neural activity index, NAI). Providing a noise covariance # matrix enables whitening of the data and forward solution. Source # orientation is optimized by setting pick_ori to 'max-power'. # weight_norm can also be set to 'unit-noise-gain'. Source orientation can # also be 'normal' (but only when using a surface-based source space) or # None, which computes a vector beamfomer. Note, however, that not all # combinations of orientation selection and weight normalization are # implemented yet. filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05, noise_cov=noise_cov, pick_ori='max-power', weight_norm='nai') # Apply this spatial filter to the evoked data. stc = apply_lcmv(evoked, filters, max_ori_out='signed') # take absolute values for plotting stc.data[:, :] = np.abs(stc.data) # Save result in stc files if desired if fname is not None: stc.save('lcmv-vol') # select time window (tmin, tmax) in ms - consider changing for real data # scenario, since those values were chosen to optimize computation time stc.crop(0.087, 0.087) # Save result in a 4D nifti file img = mne.save_stc_as_volume(fname, stc, forward['src'], mri_resolution=True) return img
# Load our data # ------------- # # First we'll load the data we'll use in connectivity estimation. We'll use # the sample MEG data provided with MNE. data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Load data inverse_operator = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Define epochs for left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw,
def test_io_events(): """Test IO for events """ tempdir = _TempDir() # Test binary fif IO events = read_events(fname) # Use as the gold standard write_events(op.join(tempdir, 'events-eve.fif'), events) events2 = read_events(op.join(tempdir, 'events-eve.fif')) assert_array_almost_equal(events, events2) # Test binary fif.gz IO events2 = read_events(fname_gz) # Use as the gold standard assert_array_almost_equal(events, events2) write_events(op.join(tempdir, 'events-eve.fif.gz'), events2) events2 = read_events(op.join(tempdir, 'events-eve.fif.gz')) assert_array_almost_equal(events, events2) # Test new format text file IO write_events(op.join(tempdir, 'events.eve'), events) events2 = read_events(op.join(tempdir, 'events.eve')) assert_array_almost_equal(events, events2) events2 = read_events(fname_txt_mpr) assert_array_almost_equal(events, events2) # Test old format text file IO events2 = read_events(fname_old_txt) assert_array_almost_equal(events, events2) write_events(op.join(tempdir, 'events.eve'), events) events2 = read_events(op.join(tempdir, 'events.eve')) assert_array_almost_equal(events, events2) # Test event selection a = read_events(op.join(tempdir, 'events-eve.fif'), include=1) b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1]) c = read_events(op.join(tempdir, 'events-eve.fif'), exclude=[2, 3, 4, 5, 32]) d = read_events(op.join(tempdir, 'events-eve.fif'), include=1, exclude=[2, 3]) assert_array_equal(a, b) assert_array_equal(a, c) assert_array_equal(a, d) # Test binary file IO for 1 event events = read_events(fname_1) # Use as the new gold standard write_events(op.join(tempdir, 'events-eve.fif'), events) events2 = read_events(op.join(tempdir, 'events-eve.fif')) assert_array_almost_equal(events, events2) # Test text file IO for 1 event write_events(op.join(tempdir, 'events.eve'), events) events2 = read_events(op.join(tempdir, 'events.eve')) assert_array_almost_equal(events, events2) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') fname2 = op.join(tempdir, 'test-bad-name.fif') write_events(fname2, events) read_events(fname2) assert_true(len(w) == 2)
We begin as always by importing the necessary Python modules and loading some :ref:`example data <sample-dataset>`; to save memory we'll use a pre-filtered and downsampled version of the example data, and we'll also load an events array to use when converting the continuous data to epochs: """ import os import mne sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False) events_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_filt-0-40_raw-eve.fif') events = mne.read_events(events_file) ############################################################################### # Annotating bad spans of data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # The tutorial :ref:`tut-events-vs-annotations` describes how # :class:`~mne.Annotations` can be read from embedded events in the raw # recording file, and :ref:`tut-annotate-raw` describes in detail how to # interactively annotate a :class:`~mne.io.Raw` data object. Here, we focus on # best practices for annotating *bad* data spans so that they will be excluded # from your analysis pipeline. # # # The ``reject_by_annotation`` parameter # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_ica_additional(): """Test additional ICA functionality""" tempdir = _TempDir() stop2 = 500 raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') test_cov = read_cov(test_cov_name) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) # test if n_components=None works with warnings.catch_warnings(record=True): ica = ICA(n_components=None, max_pca_components=None, n_pca_components=None, random_state=0) ica.fit(epochs, picks=picks, decim=3) # for testing eog functionality picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=True, exclude='bads') epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2, baseline=(None, 0), preload=True) test_cov2 = deepcopy(test_cov) ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4, n_pca_components=4) assert_true(ica.info is None) with warnings.catch_warnings(record=True): ica.fit(raw, picks[:5]) assert_true(isinstance(ica.info, Info)) assert_true(ica.n_components_ < 5) ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4) assert_raises(RuntimeError, ica.save, '') with warnings.catch_warnings(record=True): ica.fit(raw, picks=None, start=start, stop=stop2) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz') ica.save(ica_badname) read_ica(ica_badname) assert_true(len(w) == 2) # test decim ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4) raw_ = raw.copy() for _ in range(3): raw_.append(raw_) n_samples = raw_._data.shape[1] with warnings.catch_warnings(record=True): ica.fit(raw, picks=None, decim=3) assert_true(raw_._data.shape[1], n_samples) # test expl var ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4) with warnings.catch_warnings(record=True): ica.fit(raw, picks=None, decim=3) assert_true(ica.n_components_ == 4) # epochs extraction from raw fit assert_raises(RuntimeError, ica.get_sources, epochs) # test reading and writing test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif') for cov in (None, test_cov): ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4, n_pca_components=4) with warnings.catch_warnings(record=True): # ICA does not converge ica.fit(raw, picks=picks, start=start, stop=stop2) sources = ica.get_sources(epochs).get_data() assert_true(ica.mixing_matrix_.shape == (2, 2)) assert_true(ica.unmixing_matrix_.shape == (2, 2)) assert_true(ica.pca_components_.shape == (4, len(picks))) assert_true(sources.shape[1] == ica.n_components_) for exclude in [[], [0]]: ica.exclude = [0] ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert_true(ica.exclude == ica_read.exclude) ica.exclude = [] ica.apply(raw, exclude=[1]) assert_true(ica.exclude == []) ica.exclude = [0, 1] ica.apply(raw, exclude=[1]) assert_true(ica.exclude == [0, 1]) ica_raw = ica.get_sources(raw) assert_true( ica.exclude == [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']]) # test filtering d1 = ica_raw._data[0].copy() with warnings.catch_warnings(record=True): # dB warning ica_raw.filter(4, 20) assert_true((d1 != ica_raw._data[0]).any()) d1 = ica_raw._data[0].copy() with warnings.catch_warnings(record=True): # dB warning ica_raw.notch_filter([10]) assert_true((d1 != ica_raw._data[0]).any()) ica.n_pca_components = 2 ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert_true(ica.n_pca_components == ica_read.n_pca_components) # check type consistency attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' 'pca_explained_variance_ _pre_whitener') def f(x, y): return getattr(x, y).dtype for attr in attrs.split(): assert_equal(f(ica_read, attr), f(ica, attr)) ica.n_pca_components = 4 ica_read.n_pca_components = 4 ica.exclude = [] ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) for attr in [ 'mixing_matrix_', 'unmixing_matrix_', 'pca_components_', 'pca_mean_', 'pca_explained_variance_', '_pre_whitener' ]: assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr)) assert_true(ica.ch_names == ica_read.ch_names) assert_true(isinstance(ica_read.info, Info)) sources = ica.get_sources(raw)[:, :][0] sources2 = ica_read.get_sources(raw)[:, :][0] assert_array_almost_equal(sources, sources2) _raw1 = ica.apply(raw, exclude=[1]) _raw2 = ica_read.apply(raw, exclude=[1]) assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) os.remove(test_ica_fname) # check scrore funcs for name, func in score_funcs.items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(raw, target='EOG 061', score_func=func, start=0, stop=10) assert_true(ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(raw, score_func=stats.skew) # check exception handling assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1)) params = [] params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params params += [(None, 'MEG 1531')] # ECG / EOG channel params for idx, ch_name in product(*params): ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name, eog_ch=ch_name, skew_criterion=idx, var_criterion=idx, kurt_criterion=idx) with warnings.catch_warnings(record=True): idx, scores = ica.find_bads_ecg(raw, method='ctps') assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(raw, method='correlation') assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(epochs, method='ctps') assert_equal(len(scores), ica.n_components_) assert_raises(ValueError, ica.find_bads_ecg, epochs.average(), method='ctps') assert_raises(ValueError, ica.find_bads_ecg, raw, method='crazy-coupling') idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202 idx, scores = ica.find_bads_eog(raw) assert_true(isinstance(scores, list)) assert_equal(len(scores[0]), ica.n_components_) # check score funcs for name, func in score_funcs.items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(epochs_eog, target='EOG 061', score_func=func) assert_true(ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(epochs, score_func=stats.skew) # check exception handling assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1)) # ecg functionality ecg_scores = ica.score_sources(raw, target='MEG 1531', score_func='pearsonr') with warnings.catch_warnings(record=True): # filter attenuation warning ecg_events = ica_find_ecg_events(raw, sources[np.abs(ecg_scores).argmax()]) assert_true(ecg_events.ndim == 2) # eog functionality eog_scores = ica.score_sources(raw, target='EOG 061', score_func='pearsonr') with warnings.catch_warnings(record=True): # filter attenuation warning eog_events = ica_find_eog_events(raw, sources[np.abs(eog_scores).argmax()]) assert_true(eog_events.ndim == 2) # Test ica fiff export ica_raw = ica.get_sources(raw, start=0, stop=100) assert_true(ica_raw.last_samp - ica_raw.first_samp == 100) assert_true(len(ica_raw._filenames) == 0) # API consistency ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch] assert_true(ica.n_components_ == len(ica_chans)) test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif') ica.n_components = np.int32(ica.n_components) ica_raw.save(test_ica_fname, overwrite=True) ica_raw2 = io.Raw(test_ica_fname, preload=True) assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4) ica_raw2.close() os.remove(test_ica_fname) # Test ica epochs export ica_epochs = ica.get_sources(epochs) assert_true(ica_epochs.events.shape == epochs.events.shape) ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch] assert_true(ica.n_components_ == len(ica_chans)) assert_true(ica.n_components_ == ica_epochs.get_data().shape[1]) assert_true(ica_epochs.raw is None) assert_true(ica_epochs.preload is True) # test float n pca components ica.pca_explained_variance_ = np.array([0.2] * 5) ica.n_components_ = 0 for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]: ncomps_ = _check_n_pca_components(ica, ncomps) assert_true(ncomps_ == expected)
assert_array_equal) from nose.tools import assert_raises, assert_equal from mne import io, pick_types, read_events, Epochs from mne.channels.interpolation import _make_interpolation_matrix base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') event_name = op.join(base_dir, 'test-eve.fif') evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif') event_id, tmin, tmax = 1, -0.2, 0.5 event_id_2 = 2 raw = io.Raw(raw_fname, add_eeg_ref=False) events = read_events(event_name) picks = pick_types(raw.info, meg=False, eeg=True, exclude=[]) reject = dict(eeg=80e-6) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, reject=reject) # make sure it works if MEG channels are present: picks2 = np.concatenate([[0, 1, 2], picks]) n_meg = 3
def _get_events(): """Get events.""" return read_events(event_name)
from mne.datasets import sample from mne.decoding import Vectorizer from mne.preprocessing import XdawnTransformer data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.1, 0.3 event_id = dict(aud_l=1, vis_l=3) # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 20, method='iir') events = read_events(event_fname) picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude='bads') epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=False) X = epochs.get_data() y = label_binarize(epochs.events[:, 2], classes=[1, 3]).ravel() clf = make_pipeline(XdawnTransformer(n_components=3), Vectorizer(), LogisticRegression()) score = cross_val_score(clf, X, y, cv=5)
def test_apply_reference(): """Test base function for rereferencing.""" raw = read_raw_fif(fif_fname, preload=True) # Rereference raw data by creating a copy of original data reref, ref_data = _apply_reference(raw.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) # The CAR reference projection should have been removed by the function assert (not _has_eeg_average_ref_proj(reref.info['projs'])) # Test that data is modified in place when copy=False reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002']) assert (raw is reref) # Test that disabling the reference does not change anything reref, ref_data = _apply_reference(raw.copy(), []) assert_array_equal(raw._data, reref._data) # Test re-referencing Epochs object raw = read_raw_fif(fif_fname, preload=False) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) reref, ref_data = _apply_reference(epochs.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002']) # Test re-referencing Evoked object evoked = epochs.average() reref, ref_data = _apply_reference(evoked.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002']) # Referencing needs data to be preloaded raw_np = read_raw_fif(fif_fname, preload=False) pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001']) # Test having inactive SSP projections that deal with channels involved # during re-referencing raw = read_raw_fif(fif_fname, preload=True) raw.add_proj( Projection( active=False, data=dict(col_names=['EEG 001', 'EEG 002'], row_names=None, data=np.array([[1, 1]]), ncol=2, nrow=1), desc='test', kind=1, )) # Projection concerns channels mentioned in projector pytest.raises(RuntimeError, _apply_reference, raw, ['EEG 001']) # Projection does not concern channels mentioned in projector, no error _apply_reference(raw, ['EEG 003'], ['EEG 004'])
def test_add_reference(): """Test adding a reference.""" raw = read_raw_fif(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # check if channel already exists pytest.raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add reference channel to Raw raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) _check_channel_names(raw_ref, 'Ref') orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) _check_channel_names(raw, 'Ref') # for Neuromag fif's, the reference electrode location is placed in # elements [3:6] of each "data" electrode location assert_allclose(raw.info['chs'][-1]['loc'][:3], raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6) ref_idx = raw.ch_names.index('Ref') ref_data, _ = raw[ref_idx] assert_array_equal(ref_data, 0) # add reference channel to Raw when no digitization points exist raw = read_raw_fif(fif_fname).crop(0, 1).load_data() picks_eeg = pick_types(raw.info, meg=False, eeg=True) del raw.info['dig'] raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) _check_channel_names(raw_ref, 'Ref') orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) _check_channel_names(raw, 'Ref') # Test adding an existing channel as reference channel pytest.raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add two reference channels to Raw raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) _check_channel_names(raw_ref, ['M1', 'M2']) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) assert_array_equal(raw_ref._data[-2:, :], 0) raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) _check_channel_names(raw, ['M1', 'M2']) ref_idx = raw.ch_names.index('M1') ref_idy = raw.ch_names.index('M2') ref_data, _ = raw[[ref_idx, ref_idy]] assert_array_equal(ref_data, 0) # add reference channel to epochs raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) # default: proj=True, after which adding a Ref channel is prohibited pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref') # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) _check_channel_names(epochs_ref, 'Ref') ref_idx = epochs_ref.ch_names.index('Ref') ref_data = epochs_ref.get_data()[:, ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add two reference channels to epochs raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') with pytest.warns(RuntimeWarning, match='ignored .set to zero.'): epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) _check_channel_names(epochs_ref, ['M1', 'M2']) ref_idx = epochs_ref.ch_names.index('M1') ref_idy = epochs_ref.ch_names.index('M2') assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1') assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2') ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add reference channel to evoked raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') evoked = epochs.average() evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) _check_channel_names(evoked_ref, 'Ref') ref_idx = evoked_ref.ch_names.index('Ref') ref_data = evoked_ref.data[ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # add two reference channels to evoked raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') evoked = epochs.average() with pytest.warns(RuntimeWarning, match='ignored .set to zero.'): evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) _check_channel_names(evoked_ref, ['M1', 'M2']) ref_idx = evoked_ref.ch_names.index('M1') ref_idy = evoked_ref.ch_names.index('M2') ref_data = evoked_ref.data[[ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # Test invalid inputs raw_np = read_raw_fif(fif_fname, preload=False) pytest.raises(RuntimeError, add_reference_channels, raw_np, ['Ref']) pytest.raises(ValueError, add_reference_channels, raw, 1)
def test_scaler(): """Test methods of Scaler.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() y = epochs.events[:, -1] methods = (None, dict(mag=5, grad=10, eeg=20), 'mean', 'median') infos = (epochs.info, epochs.info, None, None) epochs_data_t = epochs_data.transpose([1, 0, 2]) for method, info in zip(methods, infos): if method == 'median' and not check_version('sklearn', '0.17'): assert_raises(ValueError, Scaler, info, method) continue if method == 'mean' and not check_version('sklearn', ''): assert_raises(ImportError, Scaler, info, method) continue scaler = Scaler(info, method) X = scaler.fit_transform(epochs_data, y) assert_equal(X.shape, epochs_data.shape) if method is None or isinstance(method, dict): sd = DEFAULTS['scalings'] if method is None else method stds = np.zeros(len(picks)) for key in ('mag', 'grad'): stds[pick_types(epochs.info, meg=key)] = 1. / sd[key] stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg'] means = np.zeros(len(epochs.ch_names)) elif method == 'mean': stds = np.array([np.std(ch_data) for ch_data in epochs_data_t]) means = np.array([np.mean(ch_data) for ch_data in epochs_data_t]) else: # median percs = np.array([ np.percentile(ch_data, [25, 50, 75]) for ch_data in epochs_data_t ]) stds = percs[:, 2] - percs[:, 0] means = percs[:, 1] assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis], epochs_data, rtol=1e-12, atol=1e-20, err_msg=method) X2 = scaler.fit(epochs_data, y).transform(epochs_data) assert_array_equal(X, X2) # inverse_transform Xi = scaler.inverse_transform(X) assert_array_almost_equal(epochs_data, Xi) # Test init exception assert_raises(ValueError, scaler.fit, epochs, y) assert_raises(ValueError, scaler.transform, epochs, y) epochs_bad = Epochs(raw, events, event_id, 0, 0.01, picks=np.arange(len(raw.ch_names))) # non-data chs scaler = Scaler(epochs_bad.info, None) assert_raises(ValueError, scaler.fit, epochs_bad.get_data(), y)
def _get_events(): return read_events(event_name)
print(__doc__) data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif' ############################################################################### # Reading events # -------------- # # Below we'll read in an events file. We suggest that this file end in # ``-eve.fif``. Note that we can read in the entire events file, or only # events corresponding to particular event types with the ``include`` and # ``exclude`` parameters. events_1 = mne.read_events(fname, include=1) events_1_2 = mne.read_events(fname, include=[1, 2]) events_not_4_32 = mne.read_events(fname, exclude=[4, 32]) ############################################################################### # Events objects are essentially numpy arrays with three columns: # ``event_sample | previous_event_id | event_id`` print(events_1[:5], '\n\n---\n\n', events_1_2[:5], '\n\n') for ind, before, after in events_1[:5]: print("At sample %d stim channel went from %d to %d" % (ind, before, after)) ############################################################################### # Plotting events
def test_filterestimator(): """Test methods of FilterEstimator.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() # Add tests for different combinations of l_freq and h_freq filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80, filter_length='auto', l_trans_bandwidth='auto', h_trans_bandwidth='auto') y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) assert_true(X.shape == epochs_data.shape) assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X) filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40, filter_length='auto', l_trans_bandwidth='auto', h_trans_bandwidth='auto') y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1) y = epochs.events[:, -1] with warnings.catch_warnings(record=True): # stop freq attenuation warning assert_raises(ValueError, filt.fit_transform, epochs_data, y) filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None, filter_length='auto', l_trans_bandwidth='auto', h_trans_bandwidth='auto') with warnings.catch_warnings(record=True): # stop freq attenuation warning X = filt.fit_transform(epochs_data, y) # Test init exception assert_raises(ValueError, filt.fit, epochs, y) assert_raises(ValueError, filt.transform, epochs, y)
meg = subjects[i] # Complete path to epoched data epoch_fname_fruit = data_path + meg + 'block_fruit_epochs-epo.fif' epoch_fname_odour = data_path + meg + 'block_odour_epochs-epo.fif' epoch_fname_milk = data_path + meg + 'block_milk_epochs-epo.fif' epoch_fname_LD = data_path + meg + 'block_LD_epochs-epo.fif' # Loading epoched data epochs_fruit = mne.read_epochs(epoch_fname_fruit, preload=True) epochs_odour = mne.read_epochs(epoch_fname_odour, preload=True) epochs_milk = mne.read_epochs(epoch_fname_milk, preload=True) epochs_LD = mne.read_epochs(epoch_fname_LD, preload=True) # Loading events events_fruit = mne.read_events(data_path + meg + 'block_fruit-eve.fif') events_odour = mne.read_events(data_path + meg + 'block_odour-eve.fif') events_milk = mne.read_events(data_path + meg + 'block_milk-eve.fif') events_LD = mne.read_events(data_path + meg + 'block_LD-eve.fif') picks_fruit = mne.pick_types(epochs_fruit.info, meg=True, eeg=True) picks_odour = mne.pick_types(epochs_odour.info, meg=True, eeg=True) picks_milk = mne.pick_types(epochs_milk.info, meg=True, eeg=True) picks_LD = mne.pick_types(epochs_LD.info, meg=True, eeg=True) # Computing evoked data evoked_fruit = epochs_fruit.average(picks=picks_fruit) evoked_odour = epochs_odour.average(picks=picks_odour) evoked_milk = epochs_milk.average(picks=picks_milk) evoked_LD = epochs_LD.average(picks=picks_LD)
def test_find_events(): """Test find events in raw file.""" events = read_events(fname) raw = read_raw_fif(raw_fname, preload=True) # let's test the defaulting behavior while we're at it extra_ends = ['', '_1'] orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends] os.environ['MNE_STIM_CHANNEL'] = 'STI 014' if 'MNE_STIM_CHANNEL_1' in os.environ: del os.environ['MNE_STIM_CHANNEL_1'] events2 = find_events(raw) assert_array_almost_equal(events, events2) # now test with mask events11 = find_events(raw, mask=3, mask_type='not_and') with pytest.warns(RuntimeWarning, match='events masked'): events22 = read_events(fname, mask=3, mask_type='not_and') assert_array_equal(events11, events22) # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 raw._update_times() stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], include=[stim_channel]) # test digital masking raw._data[stim_channel_idx, :5] = np.arange(5) raw._data[stim_channel_idx, 5:] = 0 # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100' pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and') pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah') # testing mask_type. default = 'not_and' assert_array_equal(find_events(raw, shortest_event=1, mask=1, mask_type='not_and'), [[2, 0, 2], [4, 2, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=2, mask_type='not_and'), [[1, 0, 1], [3, 0, 1], [4, 1, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=3, mask_type='not_and'), [[4, 0, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=4, mask_type='not_and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) # testing with mask_type = 'and' assert_array_equal(find_events(raw, shortest_event=1, mask=1, mask_type='and'), [[1, 0, 1], [3, 0, 1]]) assert_array_equal(find_events(raw, shortest_event=1, mask=2, mask_type='and'), [[2, 0, 2]]) assert_array_equal(find_events(raw, shortest_event=1, mask=3, mask_type='and'), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) assert_array_equal(find_events(raw, shortest_event=1, mask=4, mask_type='and'), [[4, 0, 4]]) # test empty events channel raw._data[stim_channel_idx, :] = 0 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, :4] = 1 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, -1:] = 9 assert_array_equal(find_events(raw), [[14399, 0, 9]]) # Test that we can handle consecutive events with no gap raw._data[stim_channel_idx, 10:20] = 5 raw._data[stim_channel_idx, 20:30] = 6 raw._data[stim_channel_idx, 30:32] = 5 raw._data[stim_channel_idx, 40] = 6 assert_array_equal(find_events(raw, consecutive=False), [[10, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, consecutive=True), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw), [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, output='offset', consecutive=False), [[31, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, output='offset', consecutive=True), [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]]) pytest.raises(ValueError, find_events, raw, output='step', consecutive=True) assert_array_equal(find_events(raw, output='step', consecutive=True, shortest_event=1), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6], [41, 6, 0], [14399, 0, 9], [14400, 9, 0]]) assert_array_equal(find_events(raw, output='offset'), [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002), [[10, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002), [[10, 0, 5], [20, 5, 6], [30, 6, 5]]) assert_array_equal(find_events(raw, output='offset', consecutive=False, min_duration=0.002), [[31, 0, 5]]) assert_array_equal(find_events(raw, output='offset', consecutive=True, min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003), [[10, 0, 5], [20, 5, 6]]) # test find_stim_steps merge parameter raw._data[stim_channel_idx, :] = 0 raw._data[stim_channel_idx, 0] = 1 raw._data[stim_channel_idx, 10] = 4 raw._data[stim_channel_idx, 11:20] = 5 assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel), [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]]) assert_array_equal(find_stim_steps(raw, merge=-1, stim_channel=stim_channel), [[1, 1, 0], [10, 0, 5], [20, 5, 0]]) assert_array_equal(find_stim_steps(raw, merge=1, stim_channel=stim_channel), [[1, 1, 0], [11, 0, 5], [20, 5, 0]]) # put back the env vars we trampled on for s, o in zip(extra_ends, orig_envs): if o is not None: os.environ['MNE_STIM_CHANNEL%s' % s] = o # Test with list of stim channels raw._data[stim_channel_idx, 1:101] = np.zeros(100) raw._data[stim_channel_idx, 10:11] = 1 raw._data[stim_channel_idx, 30:31] = 3 stim_channel2 = 'STI 015' stim_channel2_idx = pick_channels(raw.info['ch_names'], include=[stim_channel2]) raw._data[stim_channel2_idx, :] = 0 raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105] events1 = find_events(raw, stim_channel='STI 014') events2 = events1.copy() events2[:, 0] -= 5 events = find_events(raw, stim_channel=['STI 014', stim_channel2]) assert_array_equal(events[::2], events2) assert_array_equal(events[1::2], events1) # test initial_event argument info = create_info(['MYSTI'], 1000, 'stim') data = np.zeros((1, 1000)) raw = RawArray(data, info) data[0, :10] = 100 data[0, 30:40] = 200 assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]]) assert_array_equal(find_events(raw, 'MYSTI', initial_event=True), [[0, 0, 100], [30, 0, 200]]) # test error message for raw without stim channels raw = read_raw_fif(raw_fname, preload=True) raw.pick_types(meg=True, stim=False) # raw does not have annotations with pytest.raises(ValueError, match="'stim_channel'"): find_events(raw) # if raw has annotations, we show a different error message raw.set_annotations(Annotations(0, 2, "test")) with pytest.raises(ValueError, match="mne.events_from_annotations"): find_events(raw)
def test_time_frequency(): """Test time frequency transform (PSD and phase lock) """ # Set parameters event_id = 1 tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = io.Raw(raw_fname) events = read_events(event_fname) include = [] exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=include, exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0)) data = epochs.get_data() times = epochs.times nave = len(data) freqs = np.arange(6, 20, 5) # define frequencies of interest n_cycles = freqs / 4. # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) evoked = epochs.average() power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True, return_itc=False) assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # the actual data arrays here are equivalent, too... assert_array_almost_equal(power.data, power_evoked.data) print(itc) # test repr print(itc.ch_names) # test property itc += power # test add itc -= power # test add power.apply_baseline(baseline=(-0.1, 0), mode='logratio') assert_true('meg' in power) assert_true('grad' in power) assert_false('mag' in power) assert_false('eeg' in power) assert_equal(power.nave, nave) assert_equal(itc.nave, nave) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False, return_itc=True) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) Fs = raw.info['sfreq'] # sampling in Hz tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2) assert_true(tfr.shape == (len(picks), len(freqs), len(times))) single_power = single_trial_power(data, Fs, freqs, use_fft=False, n_cycles=2) assert_array_almost_equal(np.mean(single_power), power.data) power_pick = power.pick_channels(power.ch_names[:10:2]) assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2])) assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2])) power_drop = power.drop_channels(power.ch_names[1:10:2]) assert_equal(power_drop.ch_names, power_pick.ch_names) assert_equal(power_pick.data.shape[0], len(power_drop.ch_names)) mne.equalize_channels([power_pick, power_drop]) assert_equal(power_pick.ch_names, power_drop.ch_names) assert_equal(power_pick.data.shape, power_drop.data.shape)
def test_find_events(): """Test find events in raw file """ events = read_events(fname) raw = io.Raw(raw_fname, preload=True) # let's test the defaulting behavior while we're at it extra_ends = ['', '_1'] orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends] os.environ['MNE_STIM_CHANNEL'] = 'STI 014' if 'MNE_STIM_CHANNEL_1' in os.environ: del os.environ['MNE_STIM_CHANNEL_1'] events2 = find_events(raw) assert_array_almost_equal(events, events2) # now test with mask events11 = find_events(raw, mask=3) events22 = read_events(fname, mask=3) assert_array_equal(events11, events22) # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 raw._update_times() stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], include=[stim_channel]) # test digital masking raw._data[stim_channel_idx, :5] = np.arange(5) raw._data[stim_channel_idx, 5:] = 0 # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100' assert_raises(TypeError, find_events, raw, mask="0") assert_array_equal(find_events(raw, shortest_event=1, mask=1), [[2, 0, 2], [4, 2, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=2), [[1, 0, 1], [3, 0, 1], [4, 1, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=3), [[4, 0, 4]]) assert_array_equal(find_events(raw, shortest_event=1, mask=4), [[1, 0, 1], [2, 1, 2], [3, 2, 3]]) # test empty events channel raw._data[stim_channel_idx, :] = 0 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, :4] = 1 assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32')) raw._data[stim_channel_idx, -1:] = 9 assert_array_equal(find_events(raw), [[14399, 0, 9]]) # Test that we can handle consecutive events with no gap raw._data[stim_channel_idx, 10:20] = 5 raw._data[stim_channel_idx, 20:30] = 6 raw._data[stim_channel_idx, 30:32] = 5 raw._data[stim_channel_idx, 40] = 6 assert_array_equal(find_events(raw, consecutive=False), [[10, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, consecutive=True), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw), [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, output='offset', consecutive=False), [[31, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_array_equal( find_events(raw, output='offset', consecutive=True), [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]]) assert_raises(ValueError, find_events, raw, output='step', consecutive=True) assert_array_equal( find_events(raw, output='step', consecutive=True, shortest_event=1), [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6], [41, 6, 0], [14399, 0, 9], [14400, 9, 0]]) assert_array_equal(find_events(raw, output='offset'), [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]]) assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002), [[10, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002), [[10, 0, 5], [20, 5, 6], [30, 6, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=False, min_duration=0.002), [[31, 0, 5]]) assert_array_equal( find_events(raw, output='offset', consecutive=True, min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]]) assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003), [[10, 0, 5], [20, 5, 6]]) # test find_stim_steps merge parameter raw._data[stim_channel_idx, :] = 0 raw._data[stim_channel_idx, 0] = 1 raw._data[stim_channel_idx, 10] = 4 raw._data[stim_channel_idx, 11:20] = 5 assert_array_equal( find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel), [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=-1, stim_channel=stim_channel), [[1, 1, 0], [10, 0, 5], [20, 5, 0]]) assert_array_equal( find_stim_steps(raw, merge=1, stim_channel=stim_channel), [[1, 1, 0], [11, 0, 5], [20, 5, 0]]) # put back the env vars we trampled on for s, o in zip(extra_ends, orig_envs): if o is not None: os.environ['MNE_STIM_CHANNEL%s' % s] = o
def openEEGFile(raw_file_path, events_file_path): raw = Raw(raw_file_path, preload=True, verbose=False) events = read_events(events_file_path) return (raw, events)
def test_io_events(tmpdir): """Test IO for events.""" # Test binary fif IO events = read_events(fname) # Use as the gold standard fname_temp = tmpdir.join('events-eve.fif') write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # Test binary fif.gz IO events2 = read_events(fname_gz) # Use as the gold standard assert_array_almost_equal(events, events2) fname_temp += '.gz' write_events(fname_temp, events2) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # Test new format text file IO fname_temp = str(tmpdir.join('events.eve')) write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) with pytest.warns(RuntimeWarning, match='first row of'): events2 = read_events(fname_txt_mpr, mask=0, mask_type='not_and') assert_array_almost_equal(events, events2) # Test old format text file IO events2 = read_events(fname_old_txt) assert_array_almost_equal(events, events2) write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # Test event selection fname_temp = tmpdir.join('events-eve.fif') a = read_events(fname_temp, include=1) b = read_events(fname_temp, include=[1]) c = read_events(fname_temp, exclude=[2, 3, 4, 5, 32]) d = read_events(fname_temp, include=1, exclude=[2, 3]) assert_array_equal(a, b) assert_array_equal(a, c) assert_array_equal(a, d) # test reading file with mask=None events2 = events.copy() events2[:, -1] = range(events2.shape[0]) write_events(fname_temp, events2) events3 = read_events(fname_temp, mask=None) assert_array_almost_equal(events2, events3) # Test binary file IO for 1 event events = read_events(fname_1) # Use as the new gold standard write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # Test text file IO for 1 event fname_temp = str(tmpdir.join('events.eve')) write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # test warnings on bad filenames fname2 = tmpdir.join('test-bad-name.fif') with pytest.warns(RuntimeWarning, match='-eve.fif'): write_events(fname2, events) with pytest.warns(RuntimeWarning, match='-eve.fif'): read_events(fname2) # No event_id with pytest.raises(RuntimeError, match='No event_id'): read_events(fname, return_event_id=True)
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, ref_meg=False, exclude='bads', selection=left_temporal_channels) raw.pick_channels([raw.ch_names[ii] for ii in picks]) raw.info.normalize_proj() # avoid projection warnings if epochs: # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=epochs_preload, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) if epochs_preload: epochs.resample(200, npad=0, n_jobs=2) epochs.crop(0, None) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov['projs'] = [] # avoid warning with warnings.catch_warnings(record=True): # bad proj noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True) if data_cov: with warnings.catch_warnings(record=True): # too few samples data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145) else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
def test_tf_lcmv(): """Test TF beamforming based on LCMV """ label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) event_id, tmin, tmax = 1, -0.2, 0.2 # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads', selection=left_temporal_channels) raw.pick_channels([raw.ch_names[ii] for ii in picks]) raw.info.normalize_proj() # avoid projection warnings del picks # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=None, preload=False, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) epochs.drop_bad_epochs() freq_bins = [(4, 12), (15, 40)] time_windows = [(-0.1, 0.1), (0.0, 0.2)] win_lengths = [0.2, 0.2] tstep = 0.1 reg = 0.05 source_power = [] noise_covs = [] for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths): raw_band = raw.copy() raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1) epochs_band = mne.Epochs( raw_band, epochs.events, epochs.event_id, tmin=tmin, tmax=tmax, baseline=None, proj=True) with warnings.catch_warnings(record=True): # not enough samples noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin + win_length) noise_cov = mne.cov.regularize( noise_cov, epochs_band.info, mag=reg, grad=reg, eeg=reg, proj=True) noise_covs.append(noise_cov) del raw_band # to save memory # Manually calculating source power in on frequency band and several # time windows to compare to tf_lcmv results and test overlapping if (l_freq, h_freq) == freq_bins[0]: for time_window in time_windows: with warnings.catch_warnings(record=True): # bad samples data_cov = compute_covariance(epochs_band, tmin=time_window[0], tmax=time_window[1]) with warnings.catch_warnings(record=True): # bad proj stc_source_power = _lcmv_source_power( epochs.info, forward, noise_cov, data_cov, reg=reg, label=label) source_power.append(stc_source_power.data) with warnings.catch_warnings(record=True): stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, reg=reg, label=label) assert_true(len(stcs) == len(freq_bins)) assert_true(stcs[0].shape[1] == 4) # Averaging all time windows that overlap the time period 0 to 100 ms source_power = np.mean(source_power, axis=0) # Selecting the first frequency bin in tf_lcmv results stc = stcs[0] # Comparing tf_lcmv results with _lcmv_source_power results assert_array_almost_equal(stc.data[:, 2], source_power[:, 0]) # Test if using unsupported max-power orientation is detected assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, pick_ori='max-power') # Test if incorrect number of noise CSDs is detected # Test if incorrect number of noise covariances is detected assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin, tmax, tstep, win_lengths, freq_bins) # Test if freq_bins and win_lengths incompatibility is detected assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins) # Test if time step exceeding window lengths is detected assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins) # Test correct detection of preloaded epochs objects that do not contain # the underlying raw object epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=True) epochs_preloaded._raw = None with warnings.catch_warnings(record=True): # not enough samples assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins) with warnings.catch_warnings(record=True): # not enough samples # Pass only one epoch to test if subtracting evoked # responses yields zeros stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, subtract_evoked=True, reg=reg, label=label) assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_raises(ValueError, Report, image_format='foo') assert_raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise')
def test_csp(): """Test Common Spatial Patterns algorithm on epochs """ raw = io.read_raw_fif(raw_fname, preload=False) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[2:12:3] # subselect channels -> disable proj! raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=False) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] y = epochs.events[:, -1] # Init assert_raises(ValueError, CSP, n_components='foo', norm_trace=False) for reg in ['foo', -0.1, 1.1]: csp = CSP(reg=reg, norm_trace=False) assert_raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1]) for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]: CSP(reg=reg, norm_trace=False) for cov_est in ['foo', None]: assert_raises(ValueError, CSP, cov_est=cov_est, norm_trace=False) assert_raises(ValueError, CSP, norm_trace='foo') for cov_est in ['concat', 'epoch']: CSP(cov_est=cov_est, norm_trace=False) n_components = 3 # Fit for norm_trace in [True, False]: csp = CSP(n_components=n_components, norm_trace=norm_trace) csp.fit(epochs_data, epochs.events[:, -1]) assert_equal(len(csp.mean_), n_components) assert_equal(len(csp.std_), n_components) # Transform X = csp.fit_transform(epochs_data, y) sources = csp.transform(epochs_data) assert_true(sources.shape[1] == n_components) assert_true(csp.filters_.shape == (n_channels, n_channels)) assert_true(csp.patterns_.shape == (n_channels, n_channels)) assert_array_almost_equal(sources, X) # Test data exception assert_raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) assert_raises(ValueError, csp.fit, epochs, y) assert_raises(ValueError, csp.transform, epochs) # Test plots epochs.pick_types(meg='mag') cmap = ('RdBu', True) components = np.arange(n_components) for plot in (csp.plot_patterns, csp.plot_filters): plot(epochs.info, components=components, res=12, show=False, cmap=cmap) # Test with more than 2 classes epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks, event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4), baseline=(None, 0), proj=False, preload=True) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] n_channels = epochs_data.shape[1] for cov_est in ['concat', 'epoch']: csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False) csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) assert_equal(len(csp._classes), 4) assert_array_equal(csp.filters_.shape, [n_channels, n_channels]) assert_array_equal(csp.patterns_.shape, [n_channels, n_channels]) # Test average power transform n_components = 2 assert_true(csp.transform_into == 'average_power') feature_shape = [len(epochs_data), n_components] X_trans = dict() for log in (None, True, False): csp = CSP(n_components=n_components, log=log, norm_trace=False) assert_true(csp.log is log) Xt = csp.fit_transform(epochs_data, epochs.events[:, 2]) assert_array_equal(Xt.shape, feature_shape) X_trans[str(log)] = Xt # log=None => log=True assert_array_almost_equal(X_trans['None'], X_trans['True']) # Different normalization return different transform assert_true(np.sum((X_trans['True'] - X_trans['False'])**2) > 1.) # Check wrong inputs assert_raises(ValueError, CSP, transform_into='average_power', log='foo') # Test csp space transform csp = CSP(transform_into='csp_space', norm_trace=False) assert_true(csp.transform_into == 'csp_space') for log in ('foo', True, False): assert_raises(ValueError, CSP, transform_into='csp_space', log=log, norm_trace=False) n_components = 2 csp = CSP(n_components=n_components, transform_into='csp_space', norm_trace=False) Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]] assert_array_equal(Xt.shape, feature_shape) # Check mixing matrix on simulated data y = np.array([100] * 50 + [1] * 50) X, A = simulate_data(y) for cov_est in ['concat', 'epoch']: # fit csp csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False) csp.fit(X, y) # check the first pattern match the mixing matrix # the sign might change corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1]) assert_greater(np.abs(corr), 0.99) # check output out = csp.transform(X) corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1]) assert_greater(np.abs(corr), 0.95)
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) raw.del_proj() epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
def test_render_report(renderer, tmpdir): """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [proj_fname, proj_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002']) raw.del_proj() raw.set_eeg_reference(projection=True) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug evoked = epochs.average().crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=True) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert len(report.fnames) == len(fnames) assert len(report.html) == len(report.fnames) assert len(report.fnames) == len(report) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html # Projectors in Raw.info assert '<h4>SSP Projectors</h4>' in html # Projectors in `proj_fname_new` assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html # Evoked in `evoked_fname` assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html assert 'Topomap (ch_type =' in html assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html assert len(report.html) == len(fnames) assert len(report.html) == len(report.fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') tempdir = pathlib.Path(tempdir) # test using pathlib.Path with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section('foo', 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section(['foo'], 'caption', 'section')
def test_ica_full_data_recovery(): """Test recovery of full data when no source is rejected""" # Most basic recovery raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) evoked = epochs.average() n_channels = 5 data = raw._data[:n_channels].copy() data_epochs = epochs.get_data() data_evoked = evoked.data for method in ['fastica']: stuff = [(2, n_channels, True), (2, n_channels // 2, False)] for n_components, n_pca_components, ok in stuff: ica = ICA(n_components=n_components, max_pca_components=n_pca_components, n_pca_components=n_pca_components, method=method, max_iter=1) with warnings.catch_warnings(record=True): ica.fit(raw, picks=list(range(n_channels))) raw2 = ica.apply(raw, exclude=[], copy=True) if ok: assert_allclose(data[:n_channels], raw2._data[:n_channels], rtol=1e-10, atol=1e-15) else: diff = np.abs(data[:n_channels] - raw2._data[:n_channels]) assert_true(np.max(diff) > 1e-14) ica = ICA(n_components=n_components, max_pca_components=n_pca_components, n_pca_components=n_pca_components) with warnings.catch_warnings(record=True): ica.fit(epochs, picks=list(range(n_channels))) epochs2 = ica.apply(epochs, exclude=[], copy=True) data2 = epochs2.get_data()[:, :n_channels] if ok: assert_allclose(data_epochs[:, :n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(data_epochs[:, :n_channels] - data2) assert_true(np.max(diff) > 1e-14) evoked2 = ica.apply(evoked, exclude=[], copy=True) data2 = evoked2.data[:n_channels] if ok: assert_allclose(data_evoked[:n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(evoked.data[:n_channels] - data2) assert_true(np.max(diff) > 1e-14) assert_raises(ValueError, ICA, method='pizza-decomposision')
def test_compute_proj_epochs(): """Test SSP computation on epochs""" tempdir = _TempDir() event_id, tmin, tmax = 1, -0.2, 0.3 raw = Raw(raw_fname, preload=True) events = read_events(event_fname) bad_ch = 'MEG 2443' picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude=[]) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, proj=False) evoked = epochs.average() projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1) write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs) for p_fname in [proj_fname, proj_gz_fname, op.join(tempdir, 'test-proj.fif.gz')]: projs2 = read_proj(p_fname) assert_true(len(projs) == len(projs2)) for p1, p2 in zip(projs, projs2): assert_true(p1['desc'] == p2['desc']) assert_true(p1['data']['col_names'] == p2['data']['col_names']) assert_true(p1['active'] == p2['active']) # compare with sign invariance p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0]) p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') mask = np.ones(p1_data.size, dtype=np.bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] corr = np.corrcoef(p1_data, p2_data)[0, 1] assert_array_almost_equal(corr, 1.0, 5) if p2['explained_var']: assert_array_almost_equal(p1['explained_var'], p2['explained_var']) # test that you can compute the projection matrix projs = activate_proj(projs) proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[]) assert_true(nproj == 2) assert_true(U.shape[1] == 2) # test that you can save them epochs.info['projs'] += projs evoked = epochs.average() evoked.save(op.join(tempdir, 'foo-ave.fif')) projs = read_proj(proj_fname) projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0) assert_true(len(projs_evoked) == 2) # XXX : test something # test parallelization projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2, desc_prefix='foobar') assert_true(all('foobar' in x['desc'] for x in projs)) projs = activate_proj(projs) proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[]) assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') proj_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_proj(proj_badname, projs) read_proj(proj_badname) assert_naming(w, 'test_proj.py', 2)
############################################################################### # Set parameters raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif' label_names = 'Aud-rh', 'Vis-rh' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' tmin, tmax = -0.2, 0.8 event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(None, 10., fir_design='firwin') events = mne.read_events(event_fname) # Set up pick list: MEG - bad channels (modify to your needs) raw.info['bads'] += ['MEG 2443'] # mark bads picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads') # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin,
def test_fif(_bids_validate): """Test functionality of the write_raw_bids conversion for fif.""" output_path = _TempDir() data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw-eve.fif') raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # Read the file back in to check that the data has come through cleanly. # Events and bad channel information was read through JSON sidecar files. with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): read_raw_bids(bids_basename + '_meg.fif', output_path, extra_params=dict(foo='bar')) raw2 = read_raw_bids(bids_basename + '_meg.fif', output_path, extra_params=dict(allow_maxshield=True)) assert set(raw.info['bads']) == set(raw2.info['bads']) events, _ = mne.events_from_annotations(raw2) events2 = mne.read_events(events_fname) events2 = events2[events2[:, 2] != 0] assert_array_equal(events2[:, 0], events[:, 0]) # check if write_raw_bids works when there is no stim channel raw.set_channel_types({raw.ch_names[i]: 'misc' for i in mne.pick_types(raw.info, stim=True, meg=False)}) output_path = _TempDir() with pytest.warns(UserWarning, match='No events found or provided.'): write_raw_bids(raw, bids_basename, output_path, overwrite=False) _bids_validate(output_path) # try with eeg data only (conversion to bv) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) raw.load_data() raw2 = raw.pick_types(meg=False, eeg=True, stim=True, eog=True, ecg=True) raw2.save(op.join(output_path, 'test-raw.fif'), overwrite=True) raw2 = mne.io.Raw(op.join(output_path, 'test-raw.fif'), preload=False) with pytest.warns(UserWarning, match='Converting data files to BrainVision format'): write_raw_bids(raw2, bids_basename, output_path, events_data=events_fname, event_id=event_id, verbose=True, overwrite=False) os.remove(op.join(output_path, 'test-raw.fif')) bids_dir = op.join(output_path, 'sub-%s' % subject_id, 'ses-%s' % session_id, 'eeg') for sidecar in ['channels.tsv', 'eeg.eeg', 'eeg.json', 'eeg.vhdr', 'eeg.vmrk', 'events.tsv']: assert op.isfile(op.join(bids_dir, bids_basename + '_' + sidecar)) raw2 = mne.io.read_raw_brainvision(op.join(bids_dir, bids_basename + '_eeg.vhdr')) assert_array_almost_equal(raw.get_data(), raw2.get_data()) _bids_validate(output_path) # write the same data but pretend it is empty room data: raw = mne.io.read_raw_fif(raw_fname) er_date = datetime.fromtimestamp( raw.info['meas_date'][0]).strftime('%Y%m%d') er_bids_basename = 'sub-emptyroom_ses-{0}_task-noise'.format(str(er_date)) write_raw_bids(raw, er_bids_basename, output_path, overwrite=False) assert op.exists(op.join( output_path, 'sub-emptyroom', 'ses-{0}'.format(er_date), 'meg', 'sub-emptyroom_ses-{0}_task-noise_meg.json'.format(er_date))) # test that an incorrect date raises an error. er_bids_basename_bad = 'sub-emptyroom_ses-19000101_task-noise' with pytest.raises(ValueError, match='Date provided'): write_raw_bids(raw, er_bids_basename_bad, output_path, overwrite=False) # give the raw object some fake participant data (potentially overwriting) raw = mne.io.read_raw_fif(raw_fname) raw.info['subject_info'] = {'his_id': subject_id2, 'birthday': (1993, 1, 26), 'sex': 1} write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=True) # assert age of participant is correct participants_tsv = op.join(output_path, 'participants.tsv') data = _from_tsv(participants_tsv) assert data['age'][data['participant_id'].index('sub-01')] == '9' # try and write preloaded data raw = mne.io.read_raw_fif(raw_fname, preload=True) with pytest.raises(ValueError, match='preloaded'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # test anonymize raw = mne.io.read_raw_fif(raw_fname) raw.anonymize() data_path2 = _TempDir() raw_fname2 = op.join(data_path2, 'sample_audvis_raw.fif') raw.save(raw_fname2) bids_basename2 = bids_basename.replace(subject_id, subject_id2) raw = mne.io.read_raw_fif(raw_fname2) bids_output_path = write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # check that the overwrite parameters work correctly for the participant # data # change the gender but don't force overwrite. raw.info['subject_info'] = {'his_id': subject_id2, 'birthday': (1994, 1, 26), 'sex': 2} with pytest.raises(FileExistsError, match="already exists"): # noqa: F821 write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # now force the overwrite write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=True) with pytest.raises(ValueError, match='raw_file must be'): write_raw_bids('blah', bids_basename, output_path) bids_basename2 = 'sub-01_ses-01_xyz-01_run-01' with pytest.raises(KeyError, match='Unexpected entity'): write_raw_bids(raw, bids_basename2, output_path) bids_basename2 = 'sub-01_run-01_task-auditory' with pytest.raises(ValueError, match='ordered correctly'): write_raw_bids(raw, bids_basename2, output_path, overwrite=True) del raw._filenames with pytest.raises(ValueError, match='raw.filenames is missing'): write_raw_bids(raw, bids_basename2, output_path) _bids_validate(output_path) assert op.exists(op.join(output_path, 'participants.tsv')) # asserting that single fif files do not include the part key files = glob(op.join(bids_output_path, 'sub-' + subject_id2, 'ses-' + subject_id2, 'meg', '*.fif')) for ii, FILE in enumerate(files): assert 'part' not in FILE assert ii < 1 # test keyword mne-bids anonymize raw = mne.io.read_raw_fif(raw_fname) with pytest.raises(ValueError, match='`daysback` argument required'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(), overwrite=True) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) with pytest.warns(UserWarning, match='daysback` is too small'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=400), overwrite=False) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) with pytest.raises(ValueError, match='`daysback` exceeds maximum value'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=40000), overwrite=False) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=30000, keep_his=True), overwrite=False) scans_tsv = make_bids_basename( subject=subject_id, session=session_id, suffix='scans.tsv', prefix=op.join(output_path, 'sub-01', 'ses-01')) data = _from_tsv(scans_tsv) assert datetime.strptime(data['acq_time'][0], '%Y-%m-%dT%H:%M:%S').year < 1925 _bids_validate(output_path) # check that split files have part key raw = mne.io.read_raw_fif(raw_fname) data_path3 = _TempDir() raw_fname3 = op.join(data_path3, 'sample_audvis_raw.fif') raw.save(raw_fname3, buffer_size_sec=1.0, split_size='10MB', split_naming='neuromag', overwrite=True) raw = mne.io.read_raw_fif(raw_fname3) subject_id3 = '03' bids_basename3 = bids_basename.replace(subject_id, subject_id3) bids_output_path = write_raw_bids(raw, bids_basename3, output_path, overwrite=False) files = glob(op.join(bids_output_path, 'sub-' + subject_id3, 'ses-' + subject_id3, 'meg', '*.fif')) for FILE in files: assert 'part' in FILE # test unknown extention raw = mne.io.read_raw_fif(raw_fname) raw._filenames = (raw.filenames[0].replace('.fif', '.foo'),) with pytest.raises(ValueError, match='Unrecognized file format'): write_raw_bids(raw, bids_basename, output_path)