def test_compute_proj_epochs(): """Test SSP computation on epochs""" event_id, tmin, tmax = 1, -0.2, 0.3 raw = Raw(raw_fname, preload=True) events = read_events(event_fname) bad_ch = 'MEG 2443' picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude=[]) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, proj=False) evoked = epochs.average() projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1) write_proj(op.join(tempdir, 'proj.fif.gz'), projs) for p_fname in [proj_fname, proj_gz_fname, op.join(tempdir, 'proj.fif.gz')]: projs2 = read_proj(p_fname) assert_true(len(projs) == len(projs2)) for p1, p2 in zip(projs, projs2): assert_true(p1['desc'] == p2['desc']) assert_true(p1['data']['col_names'] == p2['data']['col_names']) assert_true(p1['active'] == p2['active']) # compare with sign invariance p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0]) p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') mask = np.ones(p1_data.size, dtype=np.bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] corr = np.corrcoef(p1_data, p2_data)[0, 1] assert_array_almost_equal(corr, 1.0, 5) # test that you can compute the projection matrix projs = activate_proj(projs) proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[]) assert_true(nproj == 2) assert_true(U.shape[1] == 2) # test that you can save them epochs.info['projs'] += projs evoked = epochs.average() evoked.save(op.join(tempdir, 'foo.fif')) projs = read_proj(proj_fname) projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0) assert_true(len(projs_evoked) == 2) # XXX : test something # test parallelization projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2) projs = activate_proj(projs) proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[]) assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
def test_epochs_proj(): """Test handling projection (apply proj in Raw or in Epochs) """ exclude = raw.info["bads"] + ["MEG 2443", "EEG 053"] # bads + 2 more this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude=exclude) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True) assert_true(all(p["active"] is True for p in epochs.info["projs"])) evoked = epochs.average() assert_true(all(p["active"] is True for p in evoked.info["projs"])) data = epochs.get_data() raw_proj = io.Raw(raw_fname, proj=True) epochs_no_proj = Epochs( raw_proj, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=False ) data_no_proj = epochs_no_proj.get_data() assert_true(all(p["active"] is True for p in epochs_no_proj.info["projs"])) evoked_no_proj = epochs_no_proj.average() assert_true(all(p["active"] is True for p in evoked_no_proj.info["projs"])) assert_true(epochs_no_proj.proj is True) # as projs are active from Raw assert_array_almost_equal(data, data_no_proj, decimal=8) # make sure we can exclude avg ref this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True, eog=True, exclude=exclude) epochs = Epochs( raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True, add_eeg_ref=True ) assert_true(_has_eeg_average_ref_proj(epochs.info["projs"])) epochs = Epochs( raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True, add_eeg_ref=False ) assert_true(not _has_eeg_average_ref_proj(epochs.info["projs"]))
def test_ica_ctf(): """Test run ICA computation on ctf data with/without compensation.""" method = 'fastica' raw = read_raw_ctf(ctf_fname, preload=True) events = make_fixed_length_events(raw, 99999) for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) evoked = epochs.average() # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(UserWarning, match='did not converge'): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) # test mixed compensation case raw.apply_gradient_compensation(0) ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) raw.apply_gradient_compensation(1) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) evoked = epochs.average() for inst in [raw, epochs, evoked]: with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.apply(inst) with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.get_sources(inst)
def test_read_epochs_bad_events(): """Test epochs when events are at the beginning or the end of the file """ # Event at the beginning epochs = Epochs( raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0) ) with warnings.catch_warnings(record=True): evoked = epochs.average() epochs = Epochs( raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0) ) epochs.drop_bad_epochs() with warnings.catch_warnings(record=True): evoked = epochs.average() # Event at the end epochs = Epochs( raw, np.array([[raw.last_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0) ) with warnings.catch_warnings(record=True): evoked = epochs.average() assert evoked warnings.resetwarnings()
def test_evoked_io_from_epochs(): """Test IO of evoked data made from epochs """ # offset our tmin so we don't get exactly a zero value when decimating with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax, picks=picks, baseline=(None, 0), decim=5) assert_true(len(w) == 1) evoked = epochs.average() evoked.save(op.join(tempdir, "evoked-ave.fif")) evoked2 = read_evokeds(op.join(tempdir, "evoked-ave.fif"))[0] assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1 / evoked.info["sfreq"]) # now let's do one with negative time with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") epochs = Epochs(raw, events[:4], event_id, 0.1, tmax, picks=picks, baseline=(0.1, 0.2), decim=5) evoked = epochs.average() evoked.save(op.join(tempdir, "evoked-ave.fif")) evoked2 = read_evokeds(op.join(tempdir, "evoked-ave.fif"))[0] assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) # should be equivalent to a cropped original with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") epochs = Epochs(raw, events[:4], event_id, -0.2, tmax, picks=picks, baseline=(0.1, 0.2), decim=5) evoked = epochs.average() evoked.crop(0.099, None) assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_subtract_evoked(): """Test subtraction of Evoked from Epochs """ epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks, baseline=(None, 0)) # make sure subraction fails if data channels are missing assert_raises(ValueError, epochs.subtract_evoked, epochs.average(picks[:5])) # do the subraction using the default argument epochs.subtract_evoked() # apply SSP now epochs.apply_proj() # use preloading and SSP from the start epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=True) evoked = epochs2.average() epochs2.subtract_evoked(evoked) # this gives the same result assert_allclose(epochs.get_data(), epochs2.get_data()) # if we compute the evoked response after subtracting it we get zero zero_evoked = epochs.average() data = zero_evoked.data assert_array_almost_equal(data, np.zeros_like(data), decimal=20)
def test_detrend(): """Test detrending of epochs """ # test first-order epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=None, detrend=1) epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=None, detrend=None) data_picks = pick_types(epochs_1.info, meg=True, eeg=True, exclude='bads') evoked_1 = epochs_1.average() evoked_2 = epochs_2.average() evoked_2.detrend(1) # Due to roundoff these won't be exactly equal, but they should be close assert_true(np.allclose(evoked_1.data, evoked_2.data, rtol=1e-8, atol=1e-20)) # test zeroth-order case for preload in [True, False]: epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, None), preload=preload) epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=None, preload=preload, detrend=0) a = epochs_1.get_data() b = epochs_2.get_data() # All data channels should be almost equal assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :], rtol=1e-16, atol=1e-20)) # There are non-M/EEG channels that should not be equal: assert_true(not np.allclose(a, b))
def test_ica_eeg(): """Test ICA on EEG.""" method = 'fastica' raw_fif = read_raw_fif(fif_fname, preload=True) with pytest.warns(RuntimeWarning, match='events'): raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, montage=eeglab_montage, preload=True) for raw in [raw_fif, raw_eeglab]: events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = [] picks_all.extend(picks_meg) picks_all.extend(picks_eeg) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst, picks=picks) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) with pytest.warns(RuntimeWarning, match='MISC channel'): raw = read_raw_ctf(ctf_fname2, preload=True) events = make_fixed_length_events(raw, 99999, start=0, stop=0.2, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = picks_meg + picks_eeg for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst)
def test_render_report(): """Test rendering -*.fif files for mne report. """ tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = Raw(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir) assert_true(len(w) >= 1) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html')))
def test_compute_proj(): """Test SSP computation""" event_id, tmin, tmax = 1, -0.2, 0.3 raw = Raw(raw_fname) events = read_events(event_fname) exclude = [] bad_ch = 'MEG 2443' picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude=exclude) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, proj=False) evoked = epochs.average() projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0) projs2 = read_proj(proj_fname) assert_true(len(projs) == len(projs2)) for p1, p2 in zip(projs, projs2): assert_true(p1['desc'] == p2['desc']) assert_true(p1['data']['col_names'] == p2['data']['col_names']) assert_true(p1['active'] == p2['active']) # compare with sign invariance p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0]) p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') mask = np.ones(p1_data.size, dtype=np.bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] corr = np.corrcoef(p1_data, p2_data)[0, 1] assert_array_almost_equal(corr, 1.0, 7) # test that you can compute the projection matrix projs = activate_proj(projs) proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[]) assert_true(nproj == 2) assert_true(U.shape[1] == 2) # test that you can save them epochs.info['projs'] += projs evoked = epochs.average() evoked.save('foo.fif') projs = read_proj(proj_fname) projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
def test_preload_epochs(): """Test preload of epochs """ epochs_preload = Epochs( raw, events[:16], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, reject=reject, flat=flat ) data_preload = epochs_preload.get_data() epochs = Epochs( raw, events[:16], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=False, reject=reject, flat=flat ) data = epochs.get_data() assert_array_equal(data_preload, data) assert_array_almost_equal(epochs_preload.average().data, epochs.average().data, 18)
def test_evoked_arithmetic(): """Test arithmetic of evoked data """ epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked1 = epochs1.average() epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked2 = epochs2.average() epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked = epochs.average() evoked_sum = evoked1 + evoked2 assert_array_equal(evoked.data, evoked_sum.data) assert_array_equal(evoked.times, evoked_sum.times) assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave)) evoked_diff = evoked1 - evoked1 assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_xdawn_apply_transform(): """Test Xdawn apply and transform.""" # get data raw, events, picks = _get_data() epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False) n_components = 2 # Fit Xdawn xd = Xdawn(n_components=n_components, correct_overlap='auto') xd.fit(epochs) # apply on raw xd.apply(raw) # apply on epochs xd.apply(epochs) # apply on evoked xd.apply(epochs.average()) # apply on other thing should raise an error assert_raises(ValueError, xd.apply, 42) # transform on epochs xd.transform(epochs) # transform on ndarray xd.transform(epochs._data) # transform on someting else assert_raises(ValueError, xd.transform, 42)
def test_evoked_standard_error(): """Test calculation and read/write of standard error """ epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked = [epochs.average(), epochs.standard_error()] io.write_evokeds(op.join(tempdir, 'evoked.fif'), evoked) evoked2 = read_evokeds(op.join(tempdir, 'evoked.fif'), [0, 1]) evoked3 = [read_evokeds(op.join(tempdir, 'evoked.fif'), 'Unknown'), read_evokeds(op.join(tempdir, 'evoked.fif'), 'Unknown', kind='standard_error')] for evoked_new in [evoked2, evoked3]: assert_true(evoked_new[0]._aspect_kind == FIFF.FIFFV_ASPECT_AVERAGE) assert_true(evoked_new[0].kind == 'average') assert_true(evoked_new[1]._aspect_kind == FIFF.FIFFV_ASPECT_STD_ERR) assert_true(evoked_new[1].kind == 'standard_error') for ave, ave2 in zip(evoked, evoked_new): assert_array_almost_equal(ave.data, ave2.data) assert_array_almost_equal(ave.times, ave2.times) assert_equal(ave.nave, ave2.nave) assert_equal(ave._aspect_kind, ave2._aspect_kind) assert_equal(ave.kind, ave2.kind) assert_equal(ave.last, ave2.last) assert_equal(ave.first, ave2.first)
def test_pick_seeg_ecog(): """Test picking with sEEG and ECoG """ names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split() types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split() info = create_info(names, 1024., types) idx = channel_indices_by_type(info) assert_array_equal(idx['mag'], [0, 1]) assert_array_equal(idx['eeg'], [2, 3]) assert_array_equal(idx['seeg'], [4, 5, 7]) assert_array_equal(idx['ecog'], [6, 8, 9]) assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7]) for i, t in enumerate(types): assert_equal(channel_type(info, i), types[i]) raw = RawArray(np.zeros((len(names), 10)), info) events = np.array([[1, 0, 0], [2, 0, 0]]) epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5, add_eeg_ref=False) evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True)) e_seeg = evoked.copy().pick_types(meg=False, seeg=True) for l, r in zip(e_seeg.ch_names, [names[4], names[5], names[7]]): assert_equal(l, r) # Deal with constant debacle raw = read_raw_fif(op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'), add_eeg_ref=False) assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
def test_info(): """Test info object""" raw = io.Raw(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() events = read_events(event_name) # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert_true('a' in info) assert_true('b' in info) info[42] = 'foo' assert_true(info[42] == 'foo') # test info attribute in API objects for obj in [raw, epochs, evoked]: assert_true(isinstance(obj.info, Info)) info_str = '%s' % obj.info assert_equal(len(info_str.split('\n')), (len(obj.info.keys()) + 2)) assert_true(all(k in info_str for k in obj.info.keys()))
def test_epochs_vector_inverse(): """Test vector inverse consistency between evoked and epochs.""" raw = read_raw_fif(fname_raw) events = find_events(raw, stim_channel='STI 014')[:2] reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6) epochs = Epochs(raw, events, None, 0, 0.01, baseline=None, reject=reject, preload=True) assert_equal(len(epochs), 2) evoked = epochs.average(picks=range(len(epochs.ch_names))) inv = read_inverse_operator(fname_inv) method = "MNE" snr = 3. lambda2 = 1. / snr ** 2 stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method, pick_ori='vector', return_generator=False) stc_epo = np.mean(stcs_epo) stc_evo = apply_inverse(evoked, inv, lambda2, method=method, pick_ori='vector') assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0)
def test_acqparser_averaging(): """Test averaging with AcqParserFIF vs. Elekta software.""" raw = read_raw_fif(fname_raw_elekta, preload=True) acqp = AcqParserFIF(raw.info) for cat in acqp.categories: # XXX datasets match only when baseline is applied to both, # not sure where relative dc shift comes from cond = acqp.get_condition(raw, cat) eps = Epochs(raw, baseline=(-.05, 0), **cond) ev = eps.average() ev_ref = read_evokeds(fname_ave_elekta, cat['comment'], baseline=(-.05, 0), proj=False) ev_mag = ev.copy() ev_mag.pick_channels(['MEG0111']) ev_grad = ev.copy() ev_grad.pick_channels(['MEG2643', 'MEG1622']) ev_ref_mag = ev_ref.copy() ev_ref_mag.pick_channels(['MEG0111']) ev_ref_grad = ev_ref.copy() ev_ref_grad.pick_channels(['MEG2643', 'MEG1622']) assert_allclose(ev_mag.data, ev_ref_mag.data, rtol=0, atol=1e-15) # tol = 1 fT # Elekta put these in a different order assert ev_grad.ch_names[::-1] == ev_ref_grad.ch_names assert_allclose(ev_grad.data[::-1], ev_ref_grad.data, rtol=0, atol=1e-13) # tol = 1 fT/cm
def test_xdawn_apply_transform(): """Test Xdawn apply and transform.""" # Get data raw, events, picks = _get_data() raw.pick_types(eeg=True, meg=False) epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, preload=True, baseline=None, verbose=False) n_components = 2 # Fit Xdawn xd = Xdawn(n_components=n_components, correct_overlap=False) xd.fit(epochs) # Apply on different types of instances for inst in [raw, epochs.average(), epochs]: denoise = xd.apply(inst) # Apply on other thing should raise an error assert_raises(ValueError, xd.apply, 42) # Transform on epochs xd.transform(epochs) # Transform on ndarray xd.transform(epochs._data) # Transform on someting else assert_raises(ValueError, xd.transform, 42) # Check numerical results with shuffled epochs np.random.seed(0) # random makes unstable linalg idx = np.arange(len(epochs)) np.random.shuffle(idx) xd.fit(epochs[idx]) denoise_shfl = xd.apply(epochs) assert_array_almost_equal(denoise['cond2']._data, denoise_shfl['cond2']._data)
def test_stockwell_api(): """Test stockwell functions.""" raw = read_raw_fif(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 event_name = op.join(base_dir, 'test-eve.fif') events = read_events(event_name) epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros. event_id, tmin, tmax, picks=[0, 1, 3]) for fmin, fmax in [(None, 50), (5, 50), (5, None)]: with warnings.catch_warnings(record=True): # zero papdding power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, return_itc=True) if fmax is not None: assert_true(power.freqs.max() <= fmax) with warnings.catch_warnings(record=True): # padding power_evoked = tfr_stockwell(epochs.average(), fmin=fmin, fmax=fmax, return_itc=False) # for multitaper these don't necessarily match, but they seem to # for stockwell... if this fails, this maybe could be changed # just to check the shape assert_array_almost_equal(power_evoked.data, power.data) assert_true(isinstance(power, AverageTFR)) assert_true(isinstance(itc, AverageTFR)) assert_equal(power.data.shape, itc.data.shape) assert_true(itc.data.min() >= 0.0) assert_true(itc.data.max() <= 1.0) assert_true(np.log(power.data.max()) * 20 <= 0.0) assert_true(np.log(power.data.max()) * 20 <= 0.0)
def test_xdawn_apply_transform(): """Test Xdawn apply and transform.""" # get data raw, events, picks = _get_data() epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False) n_components = 2 # Fit Xdawn xd = Xdawn(n_components=n_components, correct_overlap='auto') xd.fit(epochs) # apply on raw xd.apply(raw) # apply on epochs denoise = xd.apply(epochs) # apply on evoked xd.apply(epochs.average()) # apply on other thing should raise an error assert_raises(ValueError, xd.apply, 42) # transform on epochs xd.transform(epochs) # transform on ndarray xd.transform(epochs._data) # transform on someting else assert_raises(ValueError, xd.transform, 42) # check numerical results with shuffled epochs idx = np.arange(len(epochs)) np.random.shuffle(idx) xd.fit(epochs[idx]) denoise_shfl = xd.apply(epochs) assert_array_equal(denoise['cond2']._data, denoise_shfl['cond2']._data)
def test_read_epochs_bad_events(): """Test epochs when events are at the beginning or the end of the file """ # Event at the beginning epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked = epochs.average() epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)) epochs.drop_bad_epochs() evoked = epochs.average() # Event at the end epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)) evoked = epochs.average()
def test_epochs_proj(): """Test handling projection (apply proj in Raw or in Epochs) """ exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more this_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude=exclude) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True) epochs.average() data = epochs.get_data() raw_proj = fiff.Raw(raw_fname, proj_active=True) epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=False) epochs_no_proj.average() data_no_proj = epochs_no_proj.get_data() assert_array_almost_equal(data, data_no_proj, decimal=8)
def test_ica_full_data_recovery(method): """Test recovery of full data when no source is rejected.""" # Most basic recovery _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] with pytest.warns(RuntimeWarning, match='projection'): epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) evoked = epochs.average() n_channels = 5 data = raw._data[:n_channels].copy() data_epochs = epochs.get_data() data_evoked = evoked.data raw.set_annotations(Annotations([0.5], [0.5], ['BAD'])) methods = [method] for method in methods: stuff = [(2, n_channels, True), (2, n_channels // 2, False)] for n_components, n_pca_components, ok in stuff: ica = ICA(n_components=n_components, random_state=0, max_pca_components=n_pca_components, n_pca_components=n_pca_components, method=method, max_iter=1) with pytest.warns(UserWarning, match=None): # sometimes warns ica.fit(raw, picks=list(range(n_channels))) raw2 = ica.apply(raw.copy(), exclude=[]) if ok: assert_allclose(data[:n_channels], raw2._data[:n_channels], rtol=1e-10, atol=1e-15) else: diff = np.abs(data[:n_channels] - raw2._data[:n_channels]) assert (np.max(diff) > 1e-14) ica = ICA(n_components=n_components, method=method, max_pca_components=n_pca_components, n_pca_components=n_pca_components, random_state=0) with pytest.warns(None): # sometimes warns ica.fit(epochs, picks=list(range(n_channels))) epochs2 = ica.apply(epochs.copy(), exclude=[]) data2 = epochs2.get_data()[:, :n_channels] if ok: assert_allclose(data_epochs[:, :n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(data_epochs[:, :n_channels] - data2) assert (np.max(diff) > 1e-14) evoked2 = ica.apply(evoked.copy(), exclude=[]) data2 = evoked2.data[:n_channels] if ok: assert_allclose(data_evoked[:n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(evoked.data[:n_channels] - data2) assert (np.max(diff) > 1e-14) pytest.raises(ValueError, ICA, method='pizza-decomposision')
def test_read_epochs(): """Test reading epochs from raw files """ epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0)) epochs.average() data = epochs.get_data() epochs_no_id = Epochs(raw, pick_events(events, include=event_id), None, tmin, tmax, picks=picks, baseline=(None, 0)) assert_array_equal(data, epochs_no_id.get_data()) eog_picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True) epochs.drop_picks(eog_picks) data_no_eog = epochs.get_data() assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
def test_ica_full_data_recovery(): """Test recovery of full data when no source is rejected""" # Most basic recovery raw = Raw(raw_fname).crop(0.5, stop, False) raw.load_data() events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] with warnings.catch_warnings(record=True): # bad proj epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) evoked = epochs.average() n_channels = 5 data = raw._data[:n_channels].copy() data_epochs = epochs.get_data() data_evoked = evoked.data for method in ['fastica']: stuff = [(2, n_channels, True), (2, n_channels // 2, False)] for n_components, n_pca_components, ok in stuff: ica = ICA(n_components=n_components, max_pca_components=n_pca_components, n_pca_components=n_pca_components, method=method, max_iter=1) with warnings.catch_warnings(record=True): ica.fit(raw, picks=list(range(n_channels))) raw2 = ica.apply(raw, exclude=[], copy=True) if ok: assert_allclose(data[:n_channels], raw2._data[:n_channels], rtol=1e-10, atol=1e-15) else: diff = np.abs(data[:n_channels] - raw2._data[:n_channels]) assert_true(np.max(diff) > 1e-14) ica = ICA(n_components=n_components, max_pca_components=n_pca_components, n_pca_components=n_pca_components) with warnings.catch_warnings(record=True): ica.fit(epochs, picks=list(range(n_channels))) epochs2 = ica.apply(epochs, exclude=[], copy=True) data2 = epochs2.get_data()[:, :n_channels] if ok: assert_allclose(data_epochs[:, :n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(data_epochs[:, :n_channels] - data2) assert_true(np.max(diff) > 1e-14) evoked2 = ica.apply(evoked, exclude=[], copy=True) data2 = evoked2.data[:n_channels] if ok: assert_allclose(data_evoked[:n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(evoked.data[:n_channels] - data2) assert_true(np.max(diff) > 1e-14) assert_raises(ValueError, ICA, method='pizza-decomposision')
def test_epochs_proj(): """Test handling projection (apply proj in Raw or in Epochs) """ raw, events, picks = _get_data() exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude=exclude) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True) assert_true(all(p['active'] is True for p in epochs.info['projs'])) evoked = epochs.average() assert_true(all(p['active'] is True for p in evoked.info['projs'])) data = epochs.get_data() raw_proj = io.Raw(raw_fname, proj=True) epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=False) data_no_proj = epochs_no_proj.get_data() assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs'])) evoked_no_proj = epochs_no_proj.average() assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs'])) assert_true(epochs_no_proj.proj is True) # as projs are active from Raw assert_array_almost_equal(data, data_no_proj, decimal=8) # make sure we can exclude avg ref this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True, eog=True, exclude=exclude) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True, add_eeg_ref=True) assert_true(_has_eeg_average_ref_proj(epochs.info['projs'])) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True, add_eeg_ref=False) assert_true(not _has_eeg_average_ref_proj(epochs.info['projs'])) # make sure we don't add avg ref when a custom ref has been applied raw.info['custom_ref_applied'] = True epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks, baseline=(None, 0), proj=True) assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
def test_info(): """Test info object""" raw = Raw(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() events = read_events(event_name) # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert_true('a' in info) assert_true('b' in info) info[42] = 'foo' assert_true(info[42] == 'foo') # Test info attribute in API objects for obj in [raw, epochs, evoked]: assert_true(isinstance(obj.info, Info)) info_str = '%s' % obj.info assert_equal(len(info_str.split('\n')), len(obj.info.keys()) + 2) assert_true(all(k in info_str for k in obj.info.keys())) # Test read-only fields info = raw.info.copy() nchan = len(info['chs']) ch_names = [ch['ch_name'] for ch in info['chs']] assert_equal(info['nchan'], nchan) assert_equal(list(info['ch_names']), ch_names) # Deleting of regular fields should work info['foo'] = 'bar' del info['foo'] # Test updating of fields del info['chs'][-1] info._update_redundant() assert_equal(info['nchan'], nchan - 1) assert_equal(list(info['ch_names']), ch_names[:-1]) info['chs'][0]['ch_name'] = 'foo' info._update_redundant() assert_equal(info['ch_names'][0], 'foo') # Test casting to and from a dict info_dict = dict(info) info2 = Info(info_dict) assert_equal(info, info2)
def test_comparision_with_c(): """Test of average obtained vs C code """ c_evoked = read_evokeds(evoked_nf_name, condition=0) epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=None, preload=True, reject=None, flat=None) evoked = epochs.average() sel = pick_channels(c_evoked.ch_names, evoked.ch_names) evoked_data = evoked.data c_evoked_data = c_evoked.data[sel] assert_true(evoked.nave == c_evoked.nave) assert_array_almost_equal(evoked_data, c_evoked_data, 10) assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_info(): """Test info object.""" raw = read_raw_fif(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert ('a' in info) assert ('b' in info) info[42] = 'foo' assert (info[42] == 'foo') # Test info attribute in API objects for obj in [raw, epochs, evoked]: assert (isinstance(obj.info, Info)) info_str = '%s' % obj.info assert len(info_str.split('\n')) == len(obj.info.keys()) + 2 assert all(k in info_str for k in obj.info.keys()) assert '2002-12-03 19:01:10 GMT' in repr(obj.info), repr(obj.info) # Test read-only fields info = raw.info.copy() nchan = len(info['chs']) ch_names = [ch['ch_name'] for ch in info['chs']] assert info['nchan'] == nchan assert list(info['ch_names']) == ch_names # Deleting of regular fields should work info['foo'] = 'bar' del info['foo'] # Test updating of fields del info['chs'][-1] info._update_redundant() assert info['nchan'] == nchan - 1 assert list(info['ch_names']) == ch_names[:-1] info['chs'][0]['ch_name'] = 'foo' info._update_redundant() assert info['ch_names'][0] == 'foo' # Test casting to and from a dict info_dict = dict(info) info2 = Info(info_dict) assert info == info2
def test_ica_additional(): """Test additional ICA functionality""" tempdir = _TempDir() stop2 = 500 raw = Raw(raw_fname).crop(1.5, stop, False) raw.load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') test_cov = read_cov(test_cov_name) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) # test if n_components=None works with warnings.catch_warnings(record=True): ica = ICA(n_components=None, max_pca_components=None, n_pca_components=None, random_state=0) ica.fit(epochs, picks=picks, decim=3) # for testing eog functionality picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=True, exclude='bads') epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2, baseline=(None, 0), preload=True) test_cov2 = test_cov.copy() ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4, n_pca_components=4) assert_true(ica.info is None) with warnings.catch_warnings(record=True): ica.fit(raw, picks[:5]) assert_true(isinstance(ica.info, Info)) assert_true(ica.n_components_ < 5) ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4) assert_raises(RuntimeError, ica.save, '') with warnings.catch_warnings(record=True): ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2) # test corrmap ica2 = ica.copy() corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True, ch_type="mag") corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False) assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"]) assert_true(0 in ica.labels_["blinks"]) plt.close('all') # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz') ica.save(ica_badname) read_ica(ica_badname) assert_naming(w, 'test_ica.py', 2) # test decim ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4) raw_ = raw.copy() for _ in range(3): raw_.append(raw_) n_samples = raw_._data.shape[1] with warnings.catch_warnings(record=True): ica.fit(raw, picks=None, decim=3) assert_true(raw_._data.shape[1], n_samples) # test expl var ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4) with warnings.catch_warnings(record=True): ica.fit(raw, picks=None, decim=3) assert_true(ica.n_components_ == 4) # epochs extraction from raw fit assert_raises(RuntimeError, ica.get_sources, epochs) # test reading and writing test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif') for cov in (None, test_cov): ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4, n_pca_components=4) with warnings.catch_warnings(record=True): # ICA does not converge ica.fit(raw, picks=picks, start=start, stop=stop2) sources = ica.get_sources(epochs).get_data() assert_true(ica.mixing_matrix_.shape == (2, 2)) assert_true(ica.unmixing_matrix_.shape == (2, 2)) assert_true(ica.pca_components_.shape == (4, len(picks))) assert_true(sources.shape[1] == ica.n_components_) for exclude in [[], [0]]: ica.exclude = [0] ica.labels_ = {'foo': [0]} ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert_true(ica.exclude == ica_read.exclude) assert_equal(ica.labels_, ica_read.labels_) ica.exclude = [] ica.apply(raw, exclude=[1]) assert_true(ica.exclude == []) ica.exclude = [0, 1] ica.apply(raw, exclude=[1]) assert_true(ica.exclude == [0, 1]) ica_raw = ica.get_sources(raw) assert_true( ica.exclude == [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']]) # test filtering d1 = ica_raw._data[0].copy() with warnings.catch_warnings(record=True): # dB warning ica_raw.filter(4, 20) assert_true((d1 != ica_raw._data[0]).any()) d1 = ica_raw._data[0].copy() with warnings.catch_warnings(record=True): # dB warning ica_raw.notch_filter([10]) assert_true((d1 != ica_raw._data[0]).any()) ica.n_pca_components = 2 ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert_true(ica.n_pca_components == ica_read.n_pca_components) # check type consistency attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' 'pca_explained_variance_ _pre_whitener') def f(x, y): return getattr(x, y).dtype for attr in attrs.split(): assert_equal(f(ica_read, attr), f(ica, attr)) ica.n_pca_components = 4 ica_read.n_pca_components = 4 ica.exclude = [] ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) for attr in [ 'mixing_matrix_', 'unmixing_matrix_', 'pca_components_', 'pca_mean_', 'pca_explained_variance_', '_pre_whitener' ]: assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr)) assert_true(ica.ch_names == ica_read.ch_names) assert_true(isinstance(ica_read.info, Info)) sources = ica.get_sources(raw)[:, :][0] sources2 = ica_read.get_sources(raw)[:, :][0] assert_array_almost_equal(sources, sources2) _raw1 = ica.apply(raw, exclude=[1]) _raw2 = ica_read.apply(raw, exclude=[1]) assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) os.remove(test_ica_fname) # check scrore funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(raw, target='EOG 061', score_func=func, start=0, stop=10) assert_true(ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(raw, score_func=stats.skew) # check exception handling assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1)) params = [] params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params params += [(None, 'MEG 1531')] # ECG / EOG channel params for idx, ch_name in product(*params): ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name, eog_ch=ch_name, skew_criterion=idx, var_criterion=idx, kurt_criterion=idx) with warnings.catch_warnings(record=True): idx, scores = ica.find_bads_ecg(raw, method='ctps') assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(raw, method='correlation') assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(epochs, method='ctps') assert_equal(len(scores), ica.n_components_) assert_raises(ValueError, ica.find_bads_ecg, epochs.average(), method='ctps') assert_raises(ValueError, ica.find_bads_ecg, raw, method='crazy-coupling') idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202 idx, scores = ica.find_bads_eog(raw) assert_true(isinstance(scores, list)) assert_equal(len(scores[0]), ica.n_components_) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(epochs_eog, target='EOG 061', score_func=func) assert_true(ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(epochs, score_func=stats.skew) # check exception handling assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1)) # ecg functionality ecg_scores = ica.score_sources(raw, target='MEG 1531', score_func='pearsonr') with warnings.catch_warnings(record=True): # filter attenuation warning ecg_events = ica_find_ecg_events(raw, sources[np.abs(ecg_scores).argmax()]) assert_true(ecg_events.ndim == 2) # eog functionality eog_scores = ica.score_sources(raw, target='EOG 061', score_func='pearsonr') with warnings.catch_warnings(record=True): # filter attenuation warning eog_events = ica_find_eog_events(raw, sources[np.abs(eog_scores).argmax()]) assert_true(eog_events.ndim == 2) # Test ica fiff export ica_raw = ica.get_sources(raw, start=0, stop=100) assert_true(ica_raw.last_samp - ica_raw.first_samp == 100) assert_true(len(ica_raw._filenames) == 0) # API consistency ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch] assert_true(ica.n_components_ == len(ica_chans)) test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif') ica.n_components = np.int32(ica.n_components) ica_raw.save(test_ica_fname, overwrite=True) ica_raw2 = Raw(test_ica_fname, preload=True) assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4) ica_raw2.close() os.remove(test_ica_fname) # Test ica epochs export ica_epochs = ica.get_sources(epochs) assert_true(ica_epochs.events.shape == epochs.events.shape) ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch] assert_true(ica.n_components_ == len(ica_chans)) assert_true(ica.n_components_ == ica_epochs.get_data().shape[1]) assert_true(ica_epochs._raw is None) assert_true(ica_epochs.preload is True) # test float n pca components ica.pca_explained_variance_ = np.array([0.2] * 5) ica.n_components_ = 0 for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]: ncomps_ = ica._check_n_pca_components(ncomps) assert_true(ncomps_ == expected)
def test_time_frequency(): """Test time-frequency transform (PSD and ITC).""" # Set parameters event_id = 1 tmin = -0.2 tmax = 0.498 # Allows exhaustive decimation testing # Setup for reading the raw data raw = read_raw_fif(raw_fname) events = read_events(event_fname) include = [] exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=include, exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks) data = epochs.get_data() times = epochs.times nave = len(data) epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax) freqs = np.arange(6, 20, 5) # define frequencies of interest n_cycles = freqs / 4. # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # Now compute evoked evoked = epochs.average() power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True, return_itc=False) pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=slice(0, 2)) # Test picks argument and average parameter pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, return_itc=True, average=False) power_picks, itc_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, picks=picks, average=True) epochs_power_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=False, picks=picks, average=False) power_picks_avg = epochs_power_picks.average() # the actual data arrays here are equivalent, too... assert_array_almost_equal(power.data, power_picks.data) assert_array_almost_equal(power.data, power_picks_avg.data) assert_array_almost_equal(itc.data, itc_picks.data) assert_array_almost_equal(power.data, power_evoked.data) # complex output pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles, return_itc=False, average=True, output="complex") pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles, output="complex", average=False, return_itc=True) epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles, output="complex", average=False, return_itc=False) epochs_power_2 = abs(epochs_power_complex) epochs_power_3 = epochs_power_2.copy() epochs_power_3.data[:] = np.inf # test that it's actually copied assert_array_almost_equal(epochs_power_2.data, epochs_power_picks.data) power_2 = epochs_power_2.average() assert_array_almost_equal(power_2.data, power.data) print(itc) # test repr print(itc.ch_names) # test property itc += power # test add itc -= power # test sub power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio') assert 'meg' in power assert 'grad' in power assert 'mag' not in power assert 'eeg' not in power assert power.nave == nave assert itc.nave == nave assert (power.data.shape == (len(picks), len(freqs), len(times))) assert (power.data.shape == itc.data.shape) assert (power_.data.shape == (len(picks), len(freqs), 2)) assert (power_.data.shape == itc_.data.shape) assert (np.sum(itc.data >= 1) == 0) assert (np.sum(itc.data <= 0) == 0) # grand average itc2 = itc.copy() itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop gave = grand_average([itc2, itc]) assert gave.data.shape == (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2]) assert itc2.ch_names[1:] == gave.ch_names assert gave.nave == 2 itc2.drop_channels(itc2.info["bads"]) assert_array_almost_equal(gave.data, itc2.data) itc2.data = np.ones(itc2.data.shape) itc.data = np.zeros(itc.data.shape) itc2.nave = 2 itc.nave = 1 itc.drop_channels([itc.ch_names[0]]) combined_itc = combine_tfr([itc2, itc]) assert_array_almost_equal(combined_itc.data, np.ones(combined_itc.data.shape) * 2 / 3) # more tests power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False, return_itc=True) assert (power.data.shape == (len(picks), len(freqs), len(times))) assert (power.data.shape == itc.data.shape) assert (np.sum(itc.data >= 1) == 0) assert (np.sum(itc.data <= 0) == 0) tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False, return_itc=False).data[0] assert (tfr.shape == (len(picks), len(freqs), len(times))) tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, decim=slice(0, 2), average=False, return_itc=False).data[0] assert (tfr2.shape == (len(picks), len(freqs), 2)) single_power = tfr_morlet(epochs, freqs, 2, average=False, return_itc=False).data single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2), average=False, return_itc=False).data single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3), average=False, return_itc=False).data single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4), average=False, return_itc=False).data assert_array_almost_equal(np.mean(single_power, axis=0), power.data) assert_array_almost_equal(np.mean(single_power2, axis=0), power.data[:, :, :2]) assert_array_almost_equal(np.mean(single_power3, axis=0), power.data[:, :, 1:3]) assert_array_almost_equal(np.mean(single_power4, axis=0), power.data[:, :, 2:4]) power_pick = power.pick_channels(power.ch_names[:10:2]) assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2])) assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2])) power_drop = power.drop_channels(power.ch_names[1:10:2]) assert_equal(power_drop.ch_names, power_pick.ch_names) assert_equal(power_pick.data.shape[0], len(power_drop.ch_names)) mne.equalize_channels([power_pick, power_drop]) assert_equal(power_pick.ch_names, power_drop.ch_names) assert_equal(power_pick.data.shape, power_drop.data.shape) # Test decimation: # 2: multiple of len(times) even # 3: multiple odd # 8: not multiple, even # 9: not multiple, odd for decim in [2, 3, 8, 9]: for use_fft in [True, False]: power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=use_fft, return_itc=True, decim=decim) assert_equal(power.data.shape[2], np.ceil(float(len(times)) / decim)) freqs = list(range(50, 55)) decim = 2 _, n_chan, n_time = data.shape tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False, return_itc=False).data[0] assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim)) # Test cwt modes Ws = morlet(512, [10, 20], n_cycles=2) pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo') for use_fft in [True, False]: for mode in ['same', 'valid', 'full']: cwt(data[0], Ws, use_fft=use_fft, mode=mode) # Test invalid frequency arguments with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"): tfr_morlet(epochs, freqs=np.arange(0, 3), n_cycles=7) with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"): tfr_morlet(epochs, freqs=np.arange(-4, -1), n_cycles=7) # Test decim parameter checks pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim='decim') # When convolving in time, wavelets must not be longer than the data pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws, use_fft=False) with pytest.warns(UserWarning, match='one of the wavelets is longer'): cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True) # Check for off-by-one errors when using wavelets with an even number of # samples psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full') assert_equal(psd.shape, (2, 1, 420))
def test_compute_proj_epochs(): """Test SSP computation on epochs.""" tempdir = _TempDir() event_id, tmin, tmax = 1, -0.2, 0.3 raw = read_raw_fif(raw_fname, preload=True) events = read_events(event_fname) bad_ch = 'MEG 2443' picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude=[]) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, proj=False) evoked = epochs.average() projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1) write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs) for p_fname in [ proj_fname, proj_gz_fname, op.join(tempdir, 'test-proj.fif.gz') ]: projs2 = read_proj(p_fname) assert len(projs) == len(projs2) for p1, p2 in zip(projs, projs2): assert p1['desc'] == p2['desc'] assert p1['data']['col_names'] == p2['data']['col_names'] assert p1['active'] == p2['active'] # compare with sign invariance p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0]) p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') mask = np.ones(p1_data.size, dtype=bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] corr = np.corrcoef(p1_data, p2_data)[0, 1] assert_array_almost_equal(corr, 1.0, 5) if p2['explained_var']: assert_array_almost_equal(p1['explained_var'], p2['explained_var']) # test that you can compute the projection matrix projs = activate_proj(projs) proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[]) assert nproj == 2 assert U.shape[1] == 2 # test that you can save them epochs.info['projs'] += projs evoked = epochs.average() evoked.save(op.join(tempdir, 'foo-ave.fif')) projs = read_proj(proj_fname) projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0) assert len(projs_evoked) == 2 # XXX : test something # test parallelization projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1, desc_prefix='foobar') assert all('foobar' in x['desc'] for x in projs) projs = activate_proj(projs) proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[]) assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16) # test warnings on bad filenames proj_badname = op.join(tempdir, 'test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-proj.fif'): write_proj(proj_badname, projs) with pytest.warns(RuntimeWarning, match='-proj.fif'): read_proj(proj_badname)
def test_add_reference(): """Test adding a reference.""" raw = read_raw_fif(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # check if channel already exists pytest.raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add reference channel to Raw raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) _check_channel_names(raw_ref, 'Ref') orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) _check_channel_names(raw, 'Ref') # for Neuromag fif's, the reference electrode location is placed in # elements [3:6] of each "data" electrode location assert_allclose(raw.info['chs'][-1]['loc'][:3], raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6) ref_idx = raw.ch_names.index('Ref') ref_data, _ = raw[ref_idx] assert_array_equal(ref_data, 0) # add reference channel to Raw when no digitization points exist raw = read_raw_fif(fif_fname).crop(0, 1).load_data() picks_eeg = pick_types(raw.info, meg=False, eeg=True) del raw.info['dig'] raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) _check_channel_names(raw_ref, 'Ref') orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) _check_channel_names(raw, 'Ref') # Test adding an existing channel as reference channel pytest.raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add two reference channels to Raw raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) _check_channel_names(raw_ref, ['M1', 'M2']) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) assert_array_equal(raw_ref._data[-2:, :], 0) raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) _check_channel_names(raw, ['M1', 'M2']) ref_idx = raw.ch_names.index('M1') ref_idy = raw.ch_names.index('M2') ref_data, _ = raw[[ref_idx, ref_idy]] assert_array_equal(ref_data, 0) # add reference channel to epochs raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) # default: proj=True, after which adding a Ref channel is prohibited pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref') # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) _check_channel_names(epochs_ref, 'Ref') ref_idx = epochs_ref.ch_names.index('Ref') ref_data = epochs_ref.get_data()[:, ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add two reference channels to epochs raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') with pytest.warns(RuntimeWarning, match='ignored .set to zero.'): epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) _check_channel_names(epochs_ref, ['M1', 'M2']) ref_idx = epochs_ref.ch_names.index('M1') ref_idy = epochs_ref.ch_names.index('M2') assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1') assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2') ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add reference channel to evoked raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') evoked = epochs.average() evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) _check_channel_names(evoked_ref, 'Ref') ref_idx = evoked_ref.ch_names.index('Ref') ref_data = evoked_ref.data[ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # add two reference channels to evoked raw = read_raw_fif(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # create epochs in delayed mode, allowing removal of CAR when re-reffing epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True, proj='delayed') evoked = epochs.average() with pytest.warns(RuntimeWarning, match='ignored .set to zero.'): evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) _check_channel_names(evoked_ref, ['M1', 'M2']) ref_idx = evoked_ref.ch_names.index('M1') ref_idy = evoked_ref.ch_names.index('M2') ref_data = evoked_ref.data[[ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # Test invalid inputs raw = read_raw_fif(fif_fname, preload=False) with pytest.raises(RuntimeError, match='loaded'): add_reference_channels(raw, ['Ref']) raw.load_data() with pytest.raises(ValueError, match='Channel.*already.*'): add_reference_channels(raw, raw.ch_names[:1]) with pytest.raises(TypeError, match='instance of'): add_reference_channels(raw, 1)
verbose=True) raw = mne.io.read_raw_fif(raw_fnames[0], verbose='error') # Bad naming events = find_events(raw, stim_channel="STI 014", shortest_event=1) # Visualize raw file raw.plot() # Make an evoked file from the experimental data picks = pick_types(raw.info, meg=True, eog=True, exclude='bads') # Read epochs event_id, tmin, tmax = 9, -0.2, 0.5 epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0), picks=picks, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) evoked = epochs.average() # average epochs and get an Evoked dataset. evoked.plot(time_unit='s') # Compare to the simulated data (use verbose='error' b/c of naming) evoked_sim = read_evokeds(evoked_fnames[0], condition=0, verbose='error', baseline=(None, 0)) evoked_sim.plot(time_unit='s')
def test_array(): """Test creating raw from array """ # creating raw = Raw(fif_fname).crop(2, 5, copy=False) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not #del raw types = list() for ci in range(102): types.extend(('grad', 'grad', 'mag')) types.extend(['stim'] * 9) types.extend(['eeg'] * 60) # wrong length assert_raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') assert_raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' info = create_info(ch_names, sfreq, types) raw2 = RawArray(data, info) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) # saving temp_fname = op.join(tempdir, 'raw.fif') raw2.save(temp_fname) raw3 = Raw(temp_fname) data3, times3 = raw3[:, :] assert_allclose(data, data3) assert_allclose(times, times3) # filtering picks = pick_types(raw2.info, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() with warnings.catch_warnings(record=True): raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2) raw_hp = raw2.copy() with warnings.catch_warnings(record=True): raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2) raw_bp = raw2.copy() with warnings.catch_warnings(record=True): raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks) raw_bs = raw2.copy() with warnings.catch_warnings(record=True): raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 11 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting import matplotlib matplotlib.use('Agg') # for testing don't use X server raw2.plot() raw2.plot_psds() # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert_true(len(events) > 2) epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) epochs.plot_drop_log() epochs.plot() evoked = epochs.average() evoked.plot()
def test_render_report(): """Test rendering -*.fif files for mne report. """ tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = Raw(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1)
def test_time_frequency(): """Test time frequency transform (PSD and phase lock) """ # Set parameters event_id = 1 tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = io.Raw(raw_fname) events = read_events(event_fname) include = [] exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=include, exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0)) data = epochs.get_data() times = epochs.times nave = len(data) epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0)) freqs = np.arange(6, 20, 5) # define frequencies of interest n_cycles = freqs / 4. # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # Now compute evoked evoked = epochs.average() power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True, return_itc=False) assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # Test picks argument power_picks, itc_picks = tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, picks=picks) # the actual data arrays here are equivalent, too... assert_array_almost_equal(power.data, power_picks.data) assert_array_almost_equal(itc.data, itc_picks.data) assert_array_almost_equal(power.data, power_evoked.data) print(itc) # test repr print(itc.ch_names) # test property itc += power # test add itc -= power # test add power.apply_baseline(baseline=(-0.1, 0), mode='logratio') assert_true('meg' in power) assert_true('grad' in power) assert_false('mag' in power) assert_false('eeg' in power) assert_equal(power.nave, nave) assert_equal(itc.nave, nave) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) # grand average itc2 = itc.copy() itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop gave = grand_average([itc2, itc]) assert_equal( gave.data.shape, (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2])) assert_equal(itc2.ch_names[1:], gave.ch_names) assert_equal(gave.nave, 2) itc2.drop_channels(itc2.info["bads"]) assert_array_almost_equal(gave.data, itc2.data) itc2.data = np.ones(itc2.data.shape) itc.data = np.zeros(itc.data.shape) itc2.nave = 2 itc.nave = 1 itc.drop_channels([itc.ch_names[0]]) combined_itc = combine_tfr([itc2, itc]) assert_array_almost_equal(combined_itc.data, np.ones(combined_itc.data.shape) * 2 / 3) # more tests power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False, return_itc=True) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) Fs = raw.info['sfreq'] # sampling in Hz tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2) assert_true(tfr.shape == (len(picks), len(freqs), len(times))) single_power = single_trial_power(data, Fs, freqs, use_fft=False, n_cycles=2) assert_array_almost_equal(np.mean(single_power), power.data) power_pick = power.pick_channels(power.ch_names[:10:2]) assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2])) assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2])) power_drop = power.drop_channels(power.ch_names[1:10:2]) assert_equal(power_drop.ch_names, power_pick.ch_names) assert_equal(power_pick.data.shape[0], len(power_drop.ch_names)) mne.equalize_channels([power_pick, power_drop]) assert_equal(power_pick.ch_names, power_drop.ch_names) assert_equal(power_pick.data.shape, power_drop.data.shape)
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) raw.del_proj() epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true( op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true( op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_raises(ValueError, Report, image_format='foo') assert_raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise')
def test_array_raw(): """Test creating raw from array.""" # creating raw = read_raw_fif(fif_fname).crop(2, 5) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels types.extend(['stim'] * 9) types.extend(['dbs']) # really eeg channel types.extend(['eeg'] * 60) picks = np.concatenate([ pick_types(raw.info, meg=True)[::20], pick_types(raw.info, meg=False, stim=True), pick_types(raw.info, meg=False, eeg=True)[::20] ]) del raw data = data[picks] ch_names = np.array(ch_names)[picks].tolist() types = np.array(types)[picks].tolist() types.pop(-1) # wrong length pytest.raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') pytest.raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) assert_equal(info['chs'][0]['kind'], get_channel_type_constants()['misc']['kind']) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, data=data, info=info, first_samp=2 * data.shape[1]) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) assert ('RawArray' in repr(raw2)) pytest.raises(TypeError, RawArray, info, data) # filtering picks = pick_types(raw2.info, meg=True, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() kwargs = dict(fir_design='firwin', picks=picks) raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs) raw_hp = raw2.copy() raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs) raw_bp = raw2.copy() raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) raw_bs = raw2.copy() raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 15 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting raw2.plot() raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False) plt.close('all') # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert len(events) > 2 epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) evoked = epochs.average() assert_equal(evoked.nave, len(events) - 1) # complex data rng = np.random.RandomState(0) data = rng.randn(1, 100) + 1j * rng.randn(1, 100) raw = RawArray(data, create_info(1, 1000., 'eeg')) assert_allclose(raw._data, data) # Using digital montage to give MNI electrode coordinates n_elec = 10 ts_size = 10000 Fs = 512. ch_names = [str(i) for i in range(n_elec)] ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist() data = np.random.rand(n_elec, ts_size) montage = make_dig_montage(ch_pos=dict(zip(ch_names, ch_pos_loc)), coord_frame='head') info = create_info(ch_names, Fs, 'ecog') raw = RawArray(data, info) raw.set_montage(montage) raw.plot_psd(average=False) # looking for nonexistent layout raw.plot_psd_topo()
def test_time_frequency(): """Test the to-be-deprecated time-frequency transform (PSD and ITC).""" # Set parameters event_id = 1 tmin = -0.2 tmax = 0.498 # Allows exhaustive decimation testing # Setup for reading the raw data raw = read_raw_fif(raw_fname) events = read_events(event_fname) include = [] exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=include, exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks) data = epochs.get_data() times = epochs.times nave = len(data) epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax) freqs = np.arange(6, 20, 5) # define frequencies of interest n_cycles = freqs / 4. # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # Now compute evoked evoked = epochs.average() power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True, return_itc=False) assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=slice(0, 2)) # Test picks argument and average parameter assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, return_itc=True, average=False) power_picks, itc_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, picks=picks, average=True) epochs_power_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=False, picks=picks, average=False) power_picks_avg = epochs_power_picks.average() # the actual data arrays here are equivalent, too... assert_array_almost_equal(power.data, power_picks.data) assert_array_almost_equal(power.data, power_picks_avg.data) assert_array_almost_equal(itc.data, itc_picks.data) assert_array_almost_equal(power.data, power_evoked.data) print(itc) # test repr print(itc.ch_names) # test property itc += power # test add itc -= power # test sub power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio') assert_true('meg' in power) assert_true('grad' in power) assert_false('mag' in power) assert_false('eeg' in power) assert_equal(power.nave, nave) assert_equal(itc.nave, nave) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(power_.data.shape == (len(picks), len(freqs), 2)) assert_true(power_.data.shape == itc_.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) # grand average itc2 = itc.copy() itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop gave = grand_average([itc2, itc]) assert_equal(gave.data.shape, (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2])) assert_equal(itc2.ch_names[1:], gave.ch_names) assert_equal(gave.nave, 2) itc2.drop_channels(itc2.info["bads"]) assert_array_almost_equal(gave.data, itc2.data) itc2.data = np.ones(itc2.data.shape) itc.data = np.zeros(itc.data.shape) itc2.nave = 2 itc.nave = 1 itc.drop_channels([itc.ch_names[0]]) combined_itc = combine_tfr([itc2, itc]) assert_array_almost_equal(combined_itc.data, np.ones(combined_itc.data.shape) * 2 / 3) # more tests power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False, return_itc=True) assert_true(power.data.shape == (len(picks), len(freqs), len(times))) assert_true(power.data.shape == itc.data.shape) assert_true(np.sum(itc.data >= 1) == 0) assert_true(np.sum(itc.data <= 0) == 0) tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False, return_itc=False).data[0] assert_true(tfr.shape == (len(picks), len(freqs), len(times))) tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, decim=slice(0, 2), average=False, return_itc=False).data[0] assert_true(tfr2.shape == (len(picks), len(freqs), 2)) single_power = tfr_morlet(epochs, freqs, 2, average=False, return_itc=False).data single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2), average=False, return_itc=False).data single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3), average=False, return_itc=False).data single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4), average=False, return_itc=False).data assert_array_almost_equal(np.mean(single_power, axis=0), power.data) assert_array_almost_equal(np.mean(single_power2, axis=0), power.data[:, :, :2]) assert_array_almost_equal(np.mean(single_power3, axis=0), power.data[:, :, 1:3]) assert_array_almost_equal(np.mean(single_power4, axis=0), power.data[:, :, 2:4]) power_pick = power.pick_channels(power.ch_names[:10:2]) assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2])) assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2])) power_drop = power.drop_channels(power.ch_names[1:10:2]) assert_equal(power_drop.ch_names, power_pick.ch_names) assert_equal(power_pick.data.shape[0], len(power_drop.ch_names)) mne.equalize_channels([power_pick, power_drop]) assert_equal(power_pick.ch_names, power_drop.ch_names) assert_equal(power_pick.data.shape, power_drop.data.shape) # Test decimation: # 2: multiple of len(times) even # 3: multiple odd # 8: not multiple, even # 9: not multiple, odd for decim in [2, 3, 8, 9]: for use_fft in [True, False]: power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=use_fft, return_itc=True, decim=decim) assert_equal(power.data.shape[2], np.ceil(float(len(times)) / decim)) freqs = list(range(50, 55)) decim = 2 _, n_chan, n_time = data.shape tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False, return_itc=False).data[0] assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim)) # Test cwt modes Ws = morlet(512, [10, 20], n_cycles=2) assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo') for use_fft in [True, False]: for mode in ['same', 'valid', 'full']: # XXX JRK: full wavelet decomposition needs to be implemented if (not use_fft) and mode == 'full': assert_raises(ValueError, cwt, data[0, :, :], Ws, use_fft=use_fft, mode=mode) continue cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode) # Test decim parameter checks assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim='decim')
def test_render_report(renderer_pyvistaqt, tmpdir, invisible_fig): """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') nirs_fname_new = op.join(tempdir, 'temp_raw-nirs.snirf') for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids], [ms_fname, ms_fname_new], [events_fname, event_fname_new], [cov_fname, cov_fname_new], [ecg_proj_fname, proj_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new], [nirs_fname, nirs_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new) raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002']) raw.del_proj() raw.set_eeg_reference(projection=True).load_data() epochs = Epochs(raw, read_events(events_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever, so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug evoked = epochs.average() with pytest.warns(RuntimeWarning, match='tmax is not in Evoked'): evoked.crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=False, image_format='png') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise', n_time_points_evokeds=2, raw_butterfly=False, stc_plot_kwargs=stc_plot_kwargs, topomap_kwargs=topomap_kwargs) assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) fnames.extend(glob.glob(op.join(tempdir, '*.snirf'))) titles = [op.basename(x) for x in fnames if not x.endswith('-ave.fif')] titles.append(f'{op.basename(evoked_fname)}: {evoked.comment}') content_names = [element.name for element in report._content] for title in titles: assert title in content_names assert (''.join(report.html).find(title) != -1) assert len(report._content) == len(fnames) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) html = Path(fname).read_text(encoding='utf-8') # Evoked in `evoked_fname` assert f'{op.basename(evoked_fname)}: {evoked.comment}' in html assert 'Topographies' in html assert 'Global field power' in html assert len(report._content) == len(fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*proj.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern, raw_butterfly=False) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) content_names = [element.name for element in report._content] for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in content_names]) assert (''.join(report.html).find(op.basename(fname)) != -1) with pytest.raises(ValueError, match='Invalid value'): Report(image_format='foo') with pytest.raises(ValueError, match='Invalid value'): Report(image_format=None) # ndarray support smoke test report.add_figure(fig=np.zeros((2, 3, 3)), title='title') with pytest.raises(TypeError, match='It seems you passed a path'): report.add_figure(fig='foo', title='title')
def test_add_reference(): raw = Raw(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # check if channel already exists assert_raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add reference channel to Raw raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) _check_channel_names(raw_ref, 'Ref') orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) _check_channel_names(raw, 'Ref') ref_idx = raw.ch_names.index('Ref') ref_data, _ = raw[ref_idx] assert_array_equal(ref_data, 0) raw = Raw(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # Test adding an existing channel as reference channel assert_raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add two reference channels to Raw raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) _check_channel_names(raw_ref, ['M1', 'M2']) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) assert_array_equal(raw_ref._data[-2:, :], 0) raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) _check_channel_names(raw, ['M1', 'M2']) ref_idx = raw.ch_names.index('M1') ref_idy = raw.ch_names.index('M2') ref_data, _ = raw[[ref_idx, ref_idy]] assert_array_equal(ref_data, 0) # add reference channel to epochs raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) _check_channel_names(epochs_ref, 'Ref') ref_idx = epochs_ref.ch_names.index('Ref') ref_data = epochs_ref.get_data()[:, ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add two reference channels to epochs raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) _check_channel_names(epochs_ref, ['M1', 'M2']) ref_idx = epochs_ref.ch_names.index('M1') ref_idy = epochs_ref.ch_names.index('M2') assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1') assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2') ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add reference channel to evoked raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) evoked = epochs.average() evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) _check_channel_names(evoked_ref, 'Ref') ref_idx = evoked_ref.ch_names.index('Ref') ref_data = evoked_ref.data[ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # add two reference channels to evoked raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) evoked = epochs.average() evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) _check_channel_names(evoked_ref, ['M1', 'M2']) ref_idx = evoked_ref.ch_names.index('M1') ref_idy = evoked_ref.ch_names.index('M2') ref_data = evoked_ref.data[[ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # Test invalid inputs raw_np = Raw(fif_fname, preload=False) assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref']) assert_raises(ValueError, add_reference_channels, raw, 1)
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) raw.del_proj() epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
def test_array_raw(): """Test creating raw from array.""" import matplotlib.pyplot as plt # creating raw = read_raw_fif(fif_fname).crop(2, 5) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not # del raw types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels types.extend(['stim'] * 9) types.extend(['eeg'] * 60) # wrong length assert_raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') assert_raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, data=data, info=info, first_samp=2 * data.shape[1]) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) assert_true('RawArray' in repr(raw2)) assert_raises(TypeError, RawArray, info, data) # filtering picks = pick_types(raw2.info, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() raw_lp.filter(None, 4.0, h_trans_bandwidth=4., filter_length='auto', picks=picks, n_jobs=2, phase='zero', fir_window='hamming') raw_hp = raw2.copy() raw_hp.filter(16.0, None, l_trans_bandwidth=4., filter_length='auto', picks=picks, n_jobs=2, phase='zero', fir_window='hamming') raw_bp = raw2.copy() raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., filter_length='auto', picks=picks, phase='zero', fir_window='hamming') raw_bs = raw2.copy() raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., filter_length='auto', picks=picks, n_jobs=2, phase='zero', fir_window='hamming') data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 15 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting raw2.plot() raw2.plot_psd(tmax=np.inf, average=True, n_fft=1024, spatial_colors=False) plt.close('all') # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert_true(len(events) > 2) epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) epochs.plot_drop_log() epochs.plot() evoked = epochs.average() evoked.plot() assert_equal(evoked.nave, len(events) - 1) plt.close('all') # complex data rng = np.random.RandomState(0) data = rng.randn(1, 100) + 1j * rng.randn(1, 100) raw = RawArray(data, create_info(1, 1000., 'eeg')) assert_allclose(raw._data, data)
# template=(0, 1), threshold=0.90, label='blink_misc', plot=False) ############################################################################### # 4) Create summary plots to show signal correction on main experimental # condition # create a-cue epochs a_evs = events_from_annotations(raw, regexp='^(70)')[0] a_epo = Epochs(raw, a_evs, tmin=-1.0, tmax=2.0, reject_by_annotation=True, proj=False, preload=True) a_epo.apply_baseline(baseline=(-0.3, -0.05)) a_evo = a_epo.average() # loop over identified "bad" components bad_components = [] for label in ica.labels_: bad_components.extend(ica.labels_[label]) for bad_comp in np.unique(bad_components): # show component frequency spectrum fig_comp = ica.plot_properties(a_epo, picks=bad_comp, psd_args={'fmax': 35.}, show=False)[0] # show how the signal is affected by component rejection fig_evoked = ica.plot_overlay(a_evo, exclude=[bad_comp], show=False)
# Use scikit-learn Pipeline with cross_val_score function from sklearn.pipeline import Pipeline # noqa from sklearn.cross_validation import cross_val_score # noqa clf = Pipeline([('CSP', csp), ('LDA', lda)]) scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1) # Printing the results class_balance = np.mean(labels == labels[0]) class_balance = max(class_balance, 1. - class_balance) print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores), class_balance)) # plot CSP patterns estimated on full data for visualization csp.fit_transform(epochs_data, labels) evoked = epochs.average() evoked.data = csp.patterns_.T evoked.times = np.arange(evoked.data.shape[0]) layout = read_layout('EEG1005') evoked.plot_topomap(times=[0, 1, 2, 3, 4, 5], ch_type='eeg', layout=layout, scale_time=1, time_format='%i', scale=1, unit='Patterns (AU)', size=1.5) ############################################################################### # Look at performance over time
def test_apply_reference(): """Test base function for rereferencing.""" raw = read_raw_fif(fif_fname, preload=True) # Rereference raw data by creating a copy of original data reref, ref_data = _apply_reference(raw.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) # The CAR reference projection should have been removed by the function assert (not _has_eeg_average_ref_proj(reref.info['projs'])) # Test that data is modified in place when copy=False reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002']) assert (raw is reref) # Test that disabling the reference does not change anything reref, ref_data = _apply_reference(raw.copy(), []) assert_array_equal(raw._data, reref._data) # Test re-referencing Epochs object raw = read_raw_fif(fif_fname, preload=False) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) reref, ref_data = _apply_reference(epochs.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002']) # Test re-referencing Evoked object evoked = epochs.average() reref, ref_data = _apply_reference(evoked.copy(), ref_from=['EEG 001', 'EEG 002']) assert (reref.info['custom_ref_applied']) _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002']) # Referencing needs data to be preloaded raw_np = read_raw_fif(fif_fname, preload=False) pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001']) # Test having inactive SSP projections that deal with channels involved # during re-referencing raw = read_raw_fif(fif_fname, preload=True) raw.add_proj( Projection( active=False, data=dict(col_names=['EEG 001', 'EEG 002'], row_names=None, data=np.array([[1, 1]]), ncol=2, nrow=1), desc='test', kind=1, )) # Projection concerns channels mentioned in projector with pytest.raises(RuntimeError, match='Inactive signal space'): _apply_reference(raw, ['EEG 001']) # Projection does not concern channels mentioned in projector, no error _apply_reference(raw, ['EEG 003'], ['EEG 004']) # CSD cannot be rereferenced raw.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD with pytest.raises(RuntimeError, match="Cannot set.* type 'CSD'"): raw.set_eeg_reference()
def test_combine_channels(): """Test channel combination on Raw, Epochs, and Evoked.""" raw = read_raw_fif(raw_fname, preload=True) raw_ch_bad = read_raw_fif(raw_fname, preload=True) raw_ch_bad.info['bads'] = ['MEG 0113', 'MEG 0112'] epochs = Epochs(raw, read_events(eve_fname)) evoked = epochs.average() good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag # Test good cases combine_channels(raw, good) combined_epochs = combine_channels(epochs, good) assert_array_equal(combined_epochs.events, epochs.events) assert epochs.baseline == combined_epochs.baseline combined_evoked = combine_channels(evoked, good) assert evoked.baseline == combined_evoked.baseline combine_channels(raw, good, drop_bad=True) combine_channels(raw_ch_bad, good, drop_bad=True) # Test with stimulus channels combine_stim = combine_channels(raw, good, keep_stim=True) target_nchan = len(good) + len(pick_types(raw.info, meg=False, stim=True)) assert combine_stim.info['nchan'] == target_nchan # Test results with one ROI good_single = dict(foo=[0, 1, 3, 4]) # good grad combined_mean = combine_channels(raw, good_single, method='mean') combined_median = combine_channels(raw, good_single, method='median') combined_std = combine_channels(raw, good_single, method='std') foo_mean = np.mean(raw.get_data()[good_single['foo']], axis=0) foo_median = np.median(raw.get_data()[good_single['foo']], axis=0) foo_std = np.std(raw.get_data()[good_single['foo']], axis=0) assert_array_equal(combined_mean.get_data(), np.expand_dims(foo_mean, axis=0)) assert_array_equal(combined_median.get_data(), np.expand_dims(foo_median, axis=0)) assert_array_equal(combined_std.get_data(), np.expand_dims(foo_std, axis=0)) # Test bad cases bad1 = dict(foo=[0, 376], bar=[5, 2]) # out of bounds bad2 = dict(foo=[0, 2], bar=[5, 2]) # type mix in same group with pytest.raises(ValueError, match='"method" must be a callable, or'): combine_channels(raw, good, method='bad_method') with pytest.raises(TypeError, match='"keep_stim" must be of type bool'): combine_channels(raw, good, keep_stim='bad_type') with pytest.raises(TypeError, match='"drop_bad" must be of type bool'): combine_channels(raw, good, drop_bad='bad_type') with pytest.raises(ValueError, match='Some channel indices are out of'): combine_channels(raw, bad1) with pytest.raises(ValueError, match='Cannot combine sensors of diff'): combine_channels(raw, bad2) # Test warnings raw_no_stim = read_raw_fif(raw_fname, preload=True) raw_no_stim.pick_types(meg=True, stim=False) warn1 = dict(foo=[375, 375], bar=[5, 2]) # same channel in same group warn2 = dict(foo=[375], bar=[5, 2]) # one channel (last channel) warn3 = dict(foo=[0, 4], bar=[5, 2]) # one good channel left with pytest.warns(RuntimeWarning, match='Could not find stimulus'): combine_channels(raw_no_stim, good, keep_stim=True) with pytest.warns(RuntimeWarning, match='Less than 2 channels') as record: combine_channels(raw, warn1) combine_channels(raw, warn2) combine_channels(raw_ch_bad, warn3, drop_bad=True) assert len(record) == 3
def test_ica_additional(method): """Test additional ICA functionality.""" _skip_check_picard(method) import matplotlib.pyplot as plt tempdir = _TempDir() stop2 = 500 raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() raw.del_proj() # avoid warnings raw.set_annotations(Annotations([0.5], [0.5], ['BAD'])) # XXX This breaks the tests :( # raw.info['bads'] = [raw.ch_names[1]] test_cov = read_cov(test_cov_name) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[1::2] epochs = Epochs(raw, events, None, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=False) epochs.decimate(3, verbose='error') assert len(epochs) == 4 # test if n_components=None works ica = ICA(n_components=None, max_pca_components=None, n_pca_components=None, random_state=0, method=method, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): ica.fit(epochs) # for testing eog functionality picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)]) epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2, baseline=(None, 0), preload=True) del picks2 test_cov2 = test_cov.copy() ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4, n_pca_components=4, method=method) assert (ica.info is None) with pytest.warns(RuntimeWarning, match='normalize_proj'): ica.fit(raw, picks[:5]) assert (isinstance(ica.info, Info)) assert (ica.n_components_ < 5) ica = ICA(n_components=3, max_pca_components=4, method=method, n_pca_components=4, random_state=0) pytest.raises(RuntimeError, ica.save, '') ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2) # check passing a ch_name to find_bads_ecg with pytest.warns(RuntimeWarning, match='longer'): _, scores_1 = ica.find_bads_ecg(raw) _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1]) assert scores_1[0] != scores_2[0] # test corrmap ica2 = ica.copy() ica3 = ica.copy() corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True, ch_type="mag") corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False) assert (ica.labels_["blinks"] == ica2.labels_["blinks"]) assert (0 in ica.labels_["blinks"]) # test retrieval of component maps as arrays components = ica.get_components() template = components[:, 0] EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s') corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True, ch_type="mag") assert (ica2.labels_["blinks"] == ica3.labels_["blinks"]) plt.close('all') # test warnings on bad filenames ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-ica.fif'): ica.save(ica_badname) with pytest.warns(RuntimeWarning, match='-ica.fif'): read_ica(ica_badname) # test decim ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4, method=method, max_iter=1) raw_ = raw.copy() for _ in range(3): raw_.append(raw_) n_samples = raw_._data.shape[1] with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw, picks=picks[:5], decim=3) assert raw_._data.shape[1] == n_samples # test expl var ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4, method=method, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw, picks=None, decim=3) assert (ica.n_components_ == 4) ica_var = _ica_explained_variance(ica, raw, normalize=True) assert (np.all(ica_var[:-1] >= ica_var[1:])) # test ica sorting ica.exclude = [0] ica.labels_ = dict(blink=[0], think=[1]) ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True) assert_equal(ica_sorted.exclude, [3]) assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2])) # epochs extraction from raw fit pytest.raises(RuntimeError, ica.get_sources, epochs) # test reading and writing test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif') for cov in (None, test_cov): ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4, n_pca_components=4, method=method, max_iter=1) with pytest.warns(None): # ICA does not converge ica.fit(raw, picks=picks[:10], start=start, stop=stop2) sources = ica.get_sources(epochs).get_data() assert (ica.mixing_matrix_.shape == (2, 2)) assert (ica.unmixing_matrix_.shape == (2, 2)) assert (ica.pca_components_.shape == (4, 10)) assert (sources.shape[1] == ica.n_components_) for exclude in [[], [0], np.array([1, 2, 3])]: ica.exclude = exclude ica.labels_ = {'foo': [0]} ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert (list(ica.exclude) == ica_read.exclude) assert_equal(ica.labels_, ica_read.labels_) ica.apply(raw) ica.exclude = [] ica.apply(raw, exclude=[1]) assert (ica.exclude == []) ica.exclude = [0, 1] ica.apply(raw, exclude=[1]) assert (ica.exclude == [0, 1]) ica_raw = ica.get_sources(raw) assert (ica.exclude == [ ica_raw.ch_names.index(e) for e in ica_raw.info['bads'] ]) # test filtering d1 = ica_raw._data[0].copy() ica_raw.filter(4, 20, fir_design='firwin2') assert_equal(ica_raw.info['lowpass'], 20.) assert_equal(ica_raw.info['highpass'], 4.) assert ((d1 != ica_raw._data[0]).any()) d1 = ica_raw._data[0].copy() ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') assert ((d1 != ica_raw._data[0]).any()) ica.n_pca_components = 2 ica.method = 'fake' ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert (ica.n_pca_components == ica_read.n_pca_components) assert_equal(ica.method, ica_read.method) assert_equal(ica.labels_, ica_read.labels_) # check type consistency attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' 'pca_explained_variance_ pre_whitener_') def f(x, y): return getattr(x, y).dtype for attr in attrs.split(): assert_equal(f(ica_read, attr), f(ica, attr)) ica.n_pca_components = 4 ica_read.n_pca_components = 4 ica.exclude = [] ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) for attr in [ 'mixing_matrix_', 'unmixing_matrix_', 'pca_components_', 'pca_mean_', 'pca_explained_variance_', 'pre_whitener_' ]: assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr)) assert (ica.ch_names == ica_read.ch_names) assert (isinstance(ica_read.info, Info)) sources = ica.get_sources(raw)[:, :][0] sources2 = ica_read.get_sources(raw)[:, :][0] assert_array_almost_equal(sources, sources2) _raw1 = ica.apply(raw, exclude=[1]) _raw2 = ica_read.apply(raw, exclude=[1]) assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) os.remove(test_ica_fname) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(raw, target='EOG 061', score_func=func, start=0, stop=10) assert (ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(raw, score_func=stats.skew) # check exception handling pytest.raises(ValueError, ica.score_sources, raw, target=np.arange(1)) params = [] params += [(None, -1, slice(2), [0, 1])] # variance, kurtosis params params += [(None, 'MEG 1531')] # ECG / EOG channel params for idx, ch_name in product(*params): ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name, eog_ch=ch_name, skew_criterion=idx, var_criterion=idx, kurt_criterion=idx) evoked = epochs.average() evoked_data = evoked.data.copy() raw_data = raw[:][0].copy() epochs_data = epochs.get_data().copy() with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_ecg(raw, method='ctps') assert_equal(len(scores), ica.n_components_) with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_ecg(raw, method='correlation') assert_equal(len(scores), ica.n_components_) with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(epochs, method='ctps') assert_equal(len(scores), ica.n_components_) pytest.raises(ValueError, ica.find_bads_ecg, epochs.average(), method='ctps') pytest.raises(ValueError, ica.find_bads_ecg, raw, method='crazy-coupling') with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202 with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert (isinstance(scores, list)) assert_equal(len(scores[0]), ica.n_components_) idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441') assert_equal(len(scores), ica.n_components_) idx, scores = ica.find_bads_ecg(evoked, method='correlation') assert_equal(len(scores), ica.n_components_) assert_array_equal(raw_data, raw[:][0]) assert_array_equal(epochs_data, epochs.get_data()) assert_array_equal(evoked_data, evoked.data) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(epochs_eog, target='EOG 061', score_func=func) assert (ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(epochs, score_func=stats.skew) # check exception handling pytest.raises(ValueError, ica.score_sources, epochs, target=np.arange(1)) # ecg functionality ecg_scores = ica.score_sources(raw, target='MEG 1531', score_func='pearsonr') with pytest.warns(RuntimeWarning, match='longer'): ecg_events = ica_find_ecg_events(raw, sources[np.abs(ecg_scores).argmax()]) assert (ecg_events.ndim == 2) # eog functionality eog_scores = ica.score_sources(raw, target='EOG 061', score_func='pearsonr') with pytest.warns(RuntimeWarning, match='longer'): eog_events = ica_find_eog_events(raw, sources[np.abs(eog_scores).argmax()]) assert (eog_events.ndim == 2) # Test ica fiff export ica_raw = ica.get_sources(raw, start=0, stop=100) assert (ica_raw.last_samp - ica_raw.first_samp == 100) assert_equal(len(ica_raw._filenames), 1) # API consistency ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch] assert (ica.n_components_ == len(ica_chans)) test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif') ica.n_components = np.int32(ica.n_components) ica_raw.save(test_ica_fname, overwrite=True) ica_raw2 = read_raw_fif(test_ica_fname, preload=True) assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4) ica_raw2.close() os.remove(test_ica_fname) # Test ica epochs export ica_epochs = ica.get_sources(epochs) assert (ica_epochs.events.shape == epochs.events.shape) ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch] assert (ica.n_components_ == len(ica_chans)) assert (ica.n_components_ == ica_epochs.get_data().shape[1]) assert (ica_epochs._raw is None) assert (ica_epochs.preload is True) # test float n pca components ica.pca_explained_variance_ = np.array([0.2] * 5) ica.n_components_ = 0 for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]: ncomps_ = ica._check_n_pca_components(ncomps) assert (ncomps_ == expected) ica = ICA(method=method) with pytest.warns(None): # sometimes does not converge ica.fit(raw, picks=picks[:5]) with pytest.warns(RuntimeWarning, match='longer'): ica.find_bads_ecg(raw) ica.find_bads_eog(epochs, ch_name='MEG 0121') assert_array_equal(raw_data, raw[:][0]) raw.drop_channels(['MEG 0122']) pytest.raises(RuntimeError, ica.find_bads_eog, raw) with pytest.warns(RuntimeWarning, match='longer'): pytest.raises(RuntimeError, ica.find_bads_ecg, raw)
def test_ica_eeg(): """Test ICA on EEG.""" method = 'fastica' raw_fif = read_raw_fif(fif_fname, preload=True) raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, montage=eeglab_montage, preload=True) for raw in [raw_fif, raw_eeglab]: events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = [] picks_all.extend(picks_meg) picks_all.extend(picks_eeg) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst, picks=picks) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) with pytest.warns(RuntimeWarning, match='MISC channel'): raw = read_raw_ctf(ctf_fname2, preload=True) events = make_fixed_length_events(raw, 99999, start=0, stop=0.2, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = picks_meg + picks_eeg for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst)
def test_channel_name_limit(tmpdir, monkeypatch, fname): """Test that our remapping works properly.""" # # raw # if fname.endswith('fif'): raw = read_raw_fif(fname) raw.pick_channels(raw.ch_names[:3]) ref_names = [] data_names = raw.ch_names else: assert fname.endswith('.ds') raw = read_raw_ctf(fname) ref_names = [raw.ch_names[pick] for pick in pick_types(raw.info, meg=False, ref_meg=True)] data_names = raw.ch_names[32:35] proj = dict(data=np.ones((1, len(data_names))), col_names=data_names[:2].copy(), row_names=None, nrow=1) proj = Projection( data=proj, active=False, desc='test', kind=0, explained_var=0.) raw.add_proj(proj, remove_existing=True) raw.info.normalize_proj() raw.pick_channels(data_names + ref_names).crop(0, 2) long_names = ['123456789abcdefg' + name for name in raw.ch_names] fname = tmpdir.join('test-raw.fif') with catch_logging() as log: raw.save(fname) log = log.getvalue() assert 'truncated' not in log rename = dict(zip(raw.ch_names, long_names)) long_data_names = [rename[name] for name in data_names] long_proj_names = long_data_names[:2] raw.rename_channels(rename) for comp in raw.info['comps']: for key in ('row_names', 'col_names'): for name in comp['data'][key]: assert name in raw.ch_names if raw.info['comps']: assert raw.compensation_grade == 0 raw.apply_gradient_compensation(3) assert raw.compensation_grade == 3 assert len(raw.info['projs']) == 1 assert raw.info['projs'][0]['data']['col_names'] == long_proj_names raw.info['bads'] = bads = long_data_names[2:3] good_long_data_names = [ name for name in long_data_names if name not in bads] with catch_logging() as log: raw.save(fname, overwrite=True, verbose=True) log = log.getvalue() assert 'truncated to 15' in log for name in raw.ch_names: assert len(name) > 15 # first read the full way with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'Reading extended channel information' in log for ra in (raw, raw_read): assert ra.ch_names == long_names assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names del raw_read # next read as if no longer names could be read monkeypatch.setattr( meas_info, '_read_extended_ch_info', lambda x, y, z: None) with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'extended' not in log if raw.info['comps']: assert raw_read.compensation_grade == 3 raw_read.apply_gradient_compensation(0) assert raw_read.compensation_grade == 0 monkeypatch.setattr( # restore meas_info, '_read_extended_ch_info', _read_extended_ch_info) short_proj_names = [ f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' for ni, name in enumerate(long_data_names[:2])] assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names # # epochs # epochs = Epochs(raw, make_fixed_length_events(raw)) fname = tmpdir.join('test-epo.fif') epochs.save(fname) epochs_read = read_epochs(fname) for ep in (epochs, epochs_read): assert ep.info['ch_names'] == long_names assert ep.ch_names == long_names del raw, epochs_read # cov epochs.info['bads'] = [] cov = compute_covariance(epochs, verbose='error') fname = tmpdir.join('test-cov.fif') write_cov(fname, cov) cov_read = read_cov(fname) for co in (cov, cov_read): assert co['names'] == long_data_names assert co['bads'] == [] del cov_read # # evoked # evoked = epochs.average() evoked.info['bads'] = bads assert evoked.nave == 1 fname = tmpdir.join('test-ave.fif') evoked.save(fname) evoked_read = read_evokeds(fname)[0] for ev in (evoked, evoked_read): assert ev.ch_names == long_names assert ev.info['bads'] == bads del evoked_read, epochs # # forward # with pytest.warns(None): # not enough points for CTF sphere = make_sphere_model('auto', 'auto', evoked.info) src = setup_volume_source_space( pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) fwd = make_forward_solution(evoked.info, None, src, sphere) fname = tmpdir.join('temp-fwd.fif') write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) for fw in (fwd, fwd_read): assert fw['sol']['row_names'] == long_data_names assert fw['info']['ch_names'] == long_data_names assert fw['info']['bads'] == bads del fwd_read # # inv # inv = make_inverse_operator(evoked.info, fwd, cov) fname = tmpdir.join('test-inv.fif') write_inverse_operator(fname, inv) inv_read = read_inverse_operator(fname) for iv in (inv, inv_read): assert iv['info']['ch_names'] == good_long_data_names apply_inverse(evoked, inv) # smoke test
def test_psd(): """Tests the welch and multitaper PSD.""" raw = read_raw_fif(raw_fname) picks_psd = [0, 1] # Populate raw with sinusoids rng = np.random.RandomState(40) data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times) freqs_sig = [8., 50.] for ix, freq in zip(picks_psd, freqs_sig): data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times) first_samp = raw._first_samps[0] raw = RawArray(data, raw.info) tmin, tmax = 0, 20 # use a few seconds of data fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz n_fft = 128 # -- Raw -- kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all kws_welch = dict(n_fft=n_fft) kws_mt = dict(low_bias=True) funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func(raw, proj=False, **kws) psds_proj, freqs_proj = func(raw, proj=True, **kws) assert_true(psds.shape == (len(kws['picks']), len(freqs))) assert_true(np.sum(freqs < 0) == 0) assert_true(np.sum(psds < 0) == 0) # Is power found where it should be ixs_max = np.argmax(psds, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert_true(np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj) # Array input shouldn't work assert_raises(ValueError, func, raw[:3, :20][0]) # test n_per_seg in psd_welch (and padding) psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128, **kws_psd) psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128, **kws_psd) assert_true(len(freqs1) == np.floor(len(freqs2) / 2.)) assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.)) # tests ValueError when n_per_seg=None and n_fft > signal length kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq'])) assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None, **kws_psd) # ValueError when n_overlap > n_per_seg kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90)) assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd) # -- Epochs/Evoked -- events = read_events(event_fname) events[:, 0] -= first_samp tmin, tmax, event_id = -0.5, 0.5, 1 epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None) evoked = epochs.average() tmin_full, tmax_full = -1, 1 epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full, picks=picks_psd, proj=False, preload=True, baseline=None) kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func( epochs[:1], proj=False, **kws) psds_proj, freqs_proj = func( epochs[:1], proj=True, **kws) psds_f, freqs_f = func( epochs_full[:1], proj=False, **kws) # this one will fail if you add for example 0.1 to tmin assert_array_almost_equal(psds, psds_f, 27) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj, 27) # Is power found where it should be ixs_max = np.argmax(psds.mean(0), axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert_true(np.abs(ixmax - ixtrue) < 2) assert_true(psds.shape == (1, len(kws['picks']), len(freqs))) assert_true(np.sum(freqs < 0) == 0) assert_true(np.sum(psds < 0) == 0) # Array input shouldn't work assert_raises(ValueError, func, epochs.get_data()) # Testing evoked (doesn't work w/ compute_epochs_psd) psds_ev, freqs_ev = func( evoked, proj=False, **kws) psds_ev_proj, freqs_ev_proj = func( evoked, proj=True, **kws) # Is power found where it should be ixs_max = np.argmax(psds_ev, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs_ev)) assert_true(np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds_ev, psds_ev_proj, 27) assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
def test_render_report(renderer, tmpdir): """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [proj_fname, proj_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002']) raw.del_proj() raw.set_eeg_reference(projection=True) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug evoked = epochs.average().crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=True) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert len(report.fnames) == len(fnames) assert len(report.html) == len(report.fnames) assert len(report.fnames) == len(report) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html # Projectors in Raw.info assert '<h4>SSP Projectors</h4>' in html # Projectors in `proj_fname_new` assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html # Evoked in `evoked_fname` assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html assert 'Topomap (ch_type =' in html assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html assert len(report.html) == len(fnames) assert len(report.html) == len(report.fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') tempdir = pathlib.Path(tempdir) # test using pathlib.Path with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section('foo', 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section(['foo'], 'caption', 'section')
def test_info(): """Test info object.""" raw = read_raw_fif(raw_fname) event_id, tmin, tmax = 1, -0.2, 0.5 events = read_events(event_name) event_id = int(events[0, 2]) epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None, baseline=(None, 0)) evoked = epochs.average() # Test subclassing was successful. info = Info(a=7, b='aaaaa') assert ('a' in info) assert ('b' in info) info[42] = 'foo' assert (info[42] == 'foo') # Test info attribute in API objects for obj in [raw, epochs, evoked]: assert (isinstance(obj.info, Info)) info_str = '%s' % obj.info assert len(info_str.split('\n')) == len(obj.info.keys()) + 2 assert all(k in info_str for k in obj.info.keys()) rep = repr(obj.info) assert '2002-12-03 19:01:10 GMT' in rep, rep assert '146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)' in rep dig_rep = repr(obj.info['dig'][0]) assert 'LPA' in dig_rep, dig_rep assert '(-71.4, 0.0, 0.0) mm' in dig_rep, dig_rep assert 'head frame' in dig_rep, dig_rep # Test our BunchConstNamed support for func in (str, repr): assert '4 (FIFFV_COORD_HEAD)' == \ func(obj.info['dig'][0]['coord_frame']) # Test read-only fields info = raw.info.copy() nchan = len(info['chs']) ch_names = [ch['ch_name'] for ch in info['chs']] assert info['nchan'] == nchan assert list(info['ch_names']) == ch_names # Deleting of regular fields should work info['foo'] = 'bar' del info['foo'] # Test updating of fields del info['chs'][-1] info._update_redundant() assert info['nchan'] == nchan - 1 assert list(info['ch_names']) == ch_names[:-1] info['chs'][0]['ch_name'] = 'foo' info._update_redundant() assert info['ch_names'][0] == 'foo' # Test casting to and from a dict info_dict = dict(info) info2 = Info(info_dict) assert info == info2
def test_set_bipolar_reference(inst_type): """Test bipolar referencing.""" raw = read_raw_fif(fif_fname, preload=True) raw.apply_proj() if inst_type == 'raw': inst = raw del raw elif inst_type in ['epochs', 'evoked']: events = find_events(raw, stim_channel='STI 014') epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True) inst = epochs if inst_type == 'evoked': inst = epochs.average() del epochs ch_info = {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'} with pytest.raises(KeyError, match='key errantly present'): set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) ch_info.pop('extra') reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) assert (reref.info['custom_ref_applied']) # Compare result to a manual calculation a = inst.copy().pick_channels(['EEG 001', 'EEG 002']) a = a._data[..., 0, :] - a._data[..., 1, :] b = reref.copy().pick_channels(['bipolar'])._data[..., 0, :] assert_allclose(a, b) # Original channels should be replaced by a virtual one assert ('EEG 001' not in reref.ch_names) assert ('EEG 002' not in reref.ch_names) assert ('bipolar' in reref.ch_names) # Check channel information bp_info = reref.info['chs'][reref.ch_names.index('bipolar')] an_info = inst.info['chs'][inst.ch_names.index('EEG 001')] for key in bp_info: if key == 'coil_type': assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key elif key == 'kind': assert bp_info[key] == FIFF.FIFFV_EOG_CH, key elif key != 'ch_name': assert_equal(bp_info[key], an_info[key], err_msg=key) # Minimalist call reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002') assert ('EEG 001-EEG 002' in reref.ch_names) # Minimalist call with twice the same anode reref = set_bipolar_reference(inst, ['EEG 001', 'EEG 001', 'EEG 002'], ['EEG 002', 'EEG 003', 'EEG 003']) assert ('EEG 001-EEG 002' in reref.ch_names) assert ('EEG 001-EEG 003' in reref.ch_names) # Set multiple references at once reref = set_bipolar_reference( inst, ['EEG 001', 'EEG 003'], ['EEG 002', 'EEG 004'], ['bipolar1', 'bipolar2'], [{ 'kind': FIFF.FIFFV_EOG_CH }, { 'kind': FIFF.FIFFV_EOG_CH }], ) a = inst.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']) a = np.concatenate([ a._data[..., :1, :] - a._data[..., 1:2, :], a._data[..., 2:3, :] - a._data[..., 3:4, :] ], axis=-2) b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data assert_allclose(a, b) # Test creating a bipolar reference that doesn't involve EEG channels: # it should not set the custom_ref_applied flag reref = set_bipolar_reference(inst, 'MEG 0111', 'MEG 0112', ch_info={'kind': FIFF.FIFFV_MEG_CH}, verbose='error') assert (not reref.info['custom_ref_applied']) assert ('MEG 0111-MEG 0112' in reref.ch_names) # Test a battery of invalid inputs pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar') pytest.raises(ValueError, set_bipolar_reference, inst, ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar') pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2']) pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info=[{ 'foo': 'bar' }, { 'foo': 'bar' }]) pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', ch_name='EEG 003')