def test_cov_estimation_on_raw(): """Test estimation from raw (typically empty room)""" tempdir = _TempDir() raw = Raw(raw_fname, preload=False) cov_mne = read_cov(erm_cov_fname) cov = compute_raw_covariance(raw, tstep=None) assert_equal(cov.ch_names, cov_mne.ch_names) assert_equal(cov.nfree, cov_mne.nfree) assert_snr(cov.data, cov_mne.data, 1e4) cov = compute_raw_covariance(raw) # tstep=0.2 (default) assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples assert_snr(cov.data, cov_mne.data, 1e2) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) cov = compute_raw_covariance(raw, picks=picks, tstep=None) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_snr(cov.data, cov_mne.data[picks][:, picks], 1e4) cov = compute_raw_covariance(raw, picks=picks) assert_snr(cov.data, cov_mne.data[picks][:, picks], 90) # cutoff samps # make sure we get a warning with too short a segment raw_2 = raw.crop(0, 1) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov = compute_raw_covariance(raw_2) assert_true(any('Too few samples' in str(ww.message) for ww in w))
def test_cov_estimation_on_raw_segment(): """Estimate raw on continuous recordings (typically empty room) """ raw = Raw(raw_fname) cov = compute_raw_data_covariance(raw) cov_mne = read_cov(erm_cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) print (linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-6 # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) cov = compute_raw_data_covariance(raw, picks=picks) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks], ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-6
def test_cov_estimation_with_triggers(): """Test estimation from raw with triggers """ events = find_events(raw) event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) cov_mne = read_cov(cov_km_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05) # cov using a list of epochs and keep_sample_mean=True epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) cov_mne = read_cov(cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5) # cov with list of epochs with different projectors epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject), Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False, reject=reject)] # these should fail assert_raises(ValueError, compute_covariance, epochs) assert_raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above cov = compute_covariance(epochs, projs=epochs[0].info['projs']) cov = compute_covariance(epochs, projs=[])
def test_cov_estimation_on_raw_segment(): """Test estimation from raw on continuous recordings (typically empty room) """ tempdir = _TempDir() raw = Raw(raw_fname, preload=False) cov = compute_raw_data_covariance(raw) cov_mne = read_cov(erm_cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) cov = compute_raw_data_covariance(raw, picks=picks) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks], ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4) # make sure we get a warning with too short a segment raw_2 = raw.crop(0, 1) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov = compute_raw_data_covariance(raw_2) assert_true(len(w) == 1)
def test_cov_scaling(): """Test rescaling covs""" evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=True) cov = read_cov(cov_fname)['data'] cov2 = read_cov(cov_fname)['data'] assert_array_equal(cov, cov2) evoked.pick_channels([evoked.ch_names[k] for k in pick_types( evoked.info, meg=True, eeg=True )]) picks_list = _picks_by_type(evoked.info) scalings = dict(mag=1e15, grad=1e13, eeg=1e6) _apply_scaling_cov(cov2, picks_list, scalings=scalings) _apply_scaling_cov(cov, picks_list, scalings=scalings) assert_array_equal(cov, cov2) assert_true(cov.max() > 1) _undo_scaling_cov(cov2, picks_list, scalings=scalings) _undo_scaling_cov(cov, picks_list, scalings=scalings) assert_array_equal(cov, cov2) assert_true(cov.max() < 1) data = evoked.data.copy() _apply_scaling_array(data, picks_list, scalings=scalings) _undo_scaling_array(data, picks_list, scalings=scalings) assert_allclose(data, evoked.data, atol=1e-20)
def test_inverse_operator_channel_ordering(): """Test MNE inverse computation is immune to channel reorderings """ # These are with original ordering evoked = _get_evoked() noise_cov = read_cov(fname_cov) fwd_orig = make_forward_solution(evoked.info, fname_trans, src_fname, fname_bem, eeg=True, mindist=5.0) fwd_orig = convert_forward_solution(fwd_orig, surf_ori=True) inv_orig = make_inverse_operator(evoked.info, fwd_orig, noise_cov, loose=0.2, depth=0.8, limit_depth_chs=False) stc_1 = apply_inverse(evoked, inv_orig, lambda2, "dSPM") # Assume that a raw reordering applies to both evoked and noise_cov, # so we don't need to create those from scratch. Just reorder them, # then try to apply the original inverse operator new_order = np.arange(len(evoked.info['ch_names'])) randomiser = np.random.RandomState(42) randomiser.shuffle(new_order) evoked.data = evoked.data[new_order] evoked.info['chs'] = [evoked.info['chs'][n] for n in new_order] evoked.info._update_redundant() evoked.info._check_consistency() cov_ch_reorder = [c for c in evoked.info['ch_names'] if (c in noise_cov.ch_names)] new_order_cov = [noise_cov.ch_names.index(name) for name in cov_ch_reorder] noise_cov['data'] = noise_cov.data[np.ix_(new_order_cov, new_order_cov)] noise_cov['names'] = [noise_cov['names'][idx] for idx in new_order_cov] fwd_reorder = make_forward_solution(evoked.info, fname_trans, src_fname, fname_bem, eeg=True, mindist=5.0) fwd_reorder = convert_forward_solution(fwd_reorder, surf_ori=True) inv_reorder = make_inverse_operator(evoked.info, fwd_reorder, noise_cov, loose=0.2, depth=0.8, limit_depth_chs=False) stc_2 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM") assert_equal(stc_1.subject, stc_2.subject) assert_array_equal(stc_1.times, stc_2.times) assert_allclose(stc_1.data, stc_2.data, rtol=1e-5, atol=1e-5) assert_true(inv_orig['units'] == inv_reorder['units']) # Reload with original ordering & apply reordered inverse evoked = _get_evoked() noise_cov = read_cov(fname_cov) stc_3 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM") assert_allclose(stc_1.data, stc_3.data, rtol=1e-5, atol=1e-5)
def test_io_cov(): """Test IO for noise covariance matrices """ cov = read_cov(cov_fname) cov.save('cov.fif') cov2 = read_cov('cov.fif') assert_array_almost_equal(cov.data, cov2.data) cov['bads'] = ['EEG 039'] cov_sel = pick_channels_cov(cov, exclude=cov['bads']) assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))) assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])) cov_sel.save('cov.fif')
def test_io_cov(): """Test IO for noise covariance matrices """ cov = read_cov(cov_fname) cov.save(op.join(tempdir, "test-cov.fif")) cov2 = read_cov(op.join(tempdir, "test-cov.fif")) assert_array_almost_equal(cov.data, cov2.data) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, "test-cov.fif.gz")) cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz")) assert_array_almost_equal(cov.data, cov2.data) cov["bads"] = ["EEG 039"] cov_sel = pick_channels_cov(cov, exclude=cov["bads"]) assert_true(cov_sel["dim"] == (len(cov["data"]) - len(cov["bads"]))) assert_true(cov_sel["data"].shape == (cov_sel["dim"], cov_sel["dim"])) cov_sel.save(op.join(tempdir, "test-cov.fif")) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, "test-cov.fif.gz")) cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz")) assert_array_almost_equal(cov.data, cov2.data) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") cov_badname = op.join(tempdir, "test-bad-name.fif.gz") write_cov(cov_badname, cov) read_cov(cov_badname) assert_true(len(w) == 2)
def test_cov_estimation_on_raw(): """Test estimation from raw (typically empty room)""" tempdir = _TempDir() raw = read_raw_fif(raw_fname, preload=True) cov_mne = read_cov(erm_cov_fname) # The pure-string uses the more efficient numpy-based method, the # the list gets triaged to compute_covariance (should be equivalent # but use more memory) for method in (None, ['empirical']): # None is cast to 'empirical' cov = compute_raw_covariance(raw, tstep=None, method=method) assert_equal(cov.ch_names, cov_mne.ch_names) assert_equal(cov.nfree, cov_mne.nfree) assert_snr(cov.data, cov_mne.data, 1e4) cov = compute_raw_covariance(raw, method=method) # tstep=0.2 (default) assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples assert_snr(cov.data, cov_mne.data, 1e2) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) raw_pick = raw.copy().pick_channels( [raw.ch_names[pick] for pick in picks]) raw_pick.info.normalize_proj() cov = compute_raw_covariance(raw_pick, picks=picks, tstep=None, method=method) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_snr(cov.data, cov_mne.data[picks][:, picks], 1e4) cov = compute_raw_covariance(raw_pick, picks=picks, method=method) assert_snr(cov.data, cov_mne.data[picks][:, picks], 90) # cutoff samps # make sure we get a warning with too short a segment raw_2 = read_raw_fif(raw_fname).crop(0, 1, copy=False) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov = compute_raw_covariance(raw_2, method=method) assert_true(any('Too few samples' in str(ww.message) for ww in w)) # no epochs found due to rejection assert_raises(ValueError, compute_raw_covariance, raw, tstep=None, method='empirical', reject=dict(eog=200e-6)) # but this should work cov = compute_raw_covariance(raw.copy().crop(0, 10., copy=False), tstep=None, method=method, reject=dict(eog=1000e-6))
def test_io_cov(): """Test IO for noise covariance matrices """ tempdir = _TempDir() cov = read_cov(cov_fname) cov.save(op.join(tempdir, 'test-cov.fif')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif')) assert_array_almost_equal(cov.data, cov2.data) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, 'test-cov.fif.gz')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) cov['bads'] = ['EEG 039'] cov_sel = pick_channels_cov(cov, exclude=cov['bads']) assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))) assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])) cov_sel.save(op.join(tempdir, 'test-cov.fif')) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, 'test-cov.fif.gz')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_cov(cov_badname, cov) read_cov(cov_badname) assert_true(len(w) == 2)
def test_cov_estimation_on_raw(method, tmpdir): """Test estimation from raw (typically empty room).""" tempdir = str(tmpdir) raw = read_raw_fif(raw_fname, preload=True) cov_mne = read_cov(erm_cov_fname) # The pure-string uses the more efficient numpy-based method, the # the list gets triaged to compute_covariance (should be equivalent # but use more memory) with pytest.warns(None): # can warn about EEG ref cov = compute_raw_covariance(raw, tstep=None, method=method, rank='full') assert_equal(cov.ch_names, cov_mne.ch_names) assert_equal(cov.nfree, cov_mne.nfree) assert_snr(cov.data, cov_mne.data, 1e4) # tstep=0.2 (default) with pytest.warns(None): # can warn about EEG ref cov = compute_raw_covariance(raw, method=method, rank='full') assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples assert_snr(cov.data, cov_mne.data, 1e2) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert cov_read.ch_names == cov.ch_names assert cov_read.nfree == cov.nfree assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels raw_pick = raw.copy().pick_channels(raw.ch_names[:5]) raw_pick.info.normalize_proj() cov = compute_raw_covariance(raw_pick, tstep=None, method=method, rank='full') assert cov_mne.ch_names[:5] == cov.ch_names assert_snr(cov.data, cov_mne.data[:5, :5], 1e4) cov = compute_raw_covariance(raw_pick, method=method, rank='full') assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps # make sure we get a warning with too short a segment raw_2 = read_raw_fif(raw_fname).crop(0, 1) with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_raw_covariance(raw_2, method=method) # no epochs found due to rejection pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None, method='empirical', reject=dict(eog=200e-6)) # but this should work cov = compute_raw_covariance(raw.copy().crop(0, 10.), tstep=None, method=method, reject=dict(eog=1000e-6), verbose='error')
def test_cov_estimation_with_triggers(): """Estimate raw with triggers """ raw = Raw(raw_fname) events = find_events(raw) event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) cov_mne = read_cov(cov_km_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05) # cov using a list of epochs and keep_sample_mean=True epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) cov_mne = read_cov(cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5)
def test_io_cov(): """Test IO for noise covariance matrices """ fid, tree, _ = fiff_open(fname) cov_type = 1 cov = mne.read_cov(fid, tree, cov_type) fid.close() mne.write_cov_file('cov.fif', cov) fid, tree, _ = fiff_open('cov.fif') cov2 = mne.read_cov(fid, tree, cov_type) fid.close() print assert_array_almost_equal(cov['data'], cov2['data'])
def test_ad_hoc_cov(tmpdir): """Test ad hoc cov creation and I/O.""" out_fname = op.join(str(tmpdir), 'test-cov.fif') evoked = read_evokeds(ave_fname)[0] cov = make_ad_hoc_cov(evoked.info) cov.save(out_fname) assert 'Covariance' in repr(cov) cov2 = read_cov(out_fname) assert_array_almost_equal(cov['data'], cov2['data']) std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6) cov = make_ad_hoc_cov(evoked.info, std) cov.save(out_fname) assert 'Covariance' in repr(cov) cov2 = read_cov(out_fname) assert_array_almost_equal(cov['data'], cov2['data'])
def test_plot_cov(): """Test plotting of covariances """ raw = _get_raw() cov = read_cov(cov_fname) fig1, fig2 = plot_cov(cov, raw.info, proj=True) plt.close('all')
def test_gamma_map(): """Test Gamma MAP inverse""" forward = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True) forward = pick_types_forward(forward, meg=False, eeg=True) evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) evoked.resample(50) evoked.crop(tmin=0, tmax=0.3) cov = read_cov(fname_cov) cov = regularize(cov, evoked.info) alpha = 0.2 stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5, xyz_same_gamma=True, update_mode=1, verbose=False) idx = np.argmax(np.sum(stc.data ** 2, axis=1)) assert_true(np.concatenate(stc.vertices)[idx] == 96397) stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5, xyz_same_gamma=False, update_mode=1, verbose=False) idx = np.argmax(np.sum(stc.data ** 2, axis=1)) assert_true(np.concatenate(stc.vertices)[idx] == 82010) # force fixed orientation stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-5, xyz_same_gamma=False, update_mode=2, loose=None, return_residual=True, verbose=False) idx = np.argmax(np.sum(stc.data ** 2, axis=1)) # assert_true(np.concatenate(stc.vertices)[idx] == 83398) # XXX FIX assert_array_almost_equal(evoked.times, res.times)
def test_plot_ica_overlay(): """Test plotting of ICA cleaning.""" import matplotlib.pyplot as plt raw = _get_raw(preload=True) picks = _get_picks(raw) ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_pca_components=3, n_pca_components=3) # can't use info.normalize_proj here because of how and when ICA and Epochs # objects do picking of Raw data with pytest.warns(RuntimeWarning, match='projection'): ica.fit(raw, picks=picks) # don't test raw, needs preload ... with pytest.warns(RuntimeWarning, match='projection'): ecg_epochs = create_ecg_epochs(raw, picks=picks) ica.plot_overlay(ecg_epochs.average()) with pytest.warns(RuntimeWarning, match='projection'): eog_epochs = create_eog_epochs(raw, picks=picks) ica.plot_overlay(eog_epochs.average()) pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0]) ica.plot_overlay(raw) plt.close('all') # smoke test for CTF raw = read_raw_fif(raw_ctf_fname) raw.apply_gradient_compensation(3) picks = pick_types(raw.info, meg=True, ref_meg=False) ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3) ica.fit(raw, picks=picks) with pytest.warns(RuntimeWarning, match='longer than'): ecg_epochs = create_ecg_epochs(raw) ica.plot_overlay(ecg_epochs.average()) plt.close('all')
def run_inverse(subject_id): subject = "sub%03d" % subject_id print("processing subject: %s" % subject) data_path = op.join(meg_dir, subject) fname_ave = op.join(data_path, '%s-ave.fif' % subject) fname_cov = op.join(data_path, '%s-cov.fif' % subject) fname_fwd = op.join(data_path, '%s-meg-%s-fwd.fif' % (subject, spacing)) fname_inv = op.join(data_path, '%s-meg-%s-inv.fif' % (subject, spacing)) evokeds = mne.read_evokeds(fname_ave, condition=[0, 1, 2, 3, 4, 5]) cov = mne.read_cov(fname_cov) # cov = mne.cov.regularize(cov, evokeds[0].info, # mag=0.05, grad=0.05, eeg=0.1, proj=True) forward = mne.read_forward_solution(fname_fwd, surf_ori=True) # forward = mne.pick_types_forward(forward, meg=True, eeg=False) # make an M/EEG, MEG-only, and EEG-only inverse operators info = evokeds[0].info inverse_operator = make_inverse_operator(info, forward, cov, loose=0.2, depth=0.8) write_inverse_operator(fname_inv, inverse_operator) # Compute inverse solution snr = 3.0 lambda2 = 1.0 / snr ** 2 for evoked in evokeds: stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM", pick_ori=None) stc.save(op.join(data_path, 'mne_dSPM_inverse-%s' % evoked.comment))
def test_make_inverse_operator_free(): """Test MNE inverse computation (free orientation) """ fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True) fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False, force_fixed=False) fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False, force_fixed=True) evoked = _get_evoked() noise_cov = read_cov(fname_cov) # can't make free inv with fixed fwd assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2, noise_cov, depth=None) # for free ori inv, loose=None and loose=1 should be equivalent inv_1 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None) inv_2 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=1) _compare_inverses_approx(inv_1, inv_2, evoked, 0, 1e-2) # for depth=None, surf_ori of the fwd should not matter inv_3 = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=None, loose=None) inv_4 = make_inverse_operator(evoked.info, fwd_1, noise_cov, depth=None, loose=None) _compare_inverses_approx(inv_3, inv_4, evoked, 0, 1e-2)
def apply_inverse_ave(fnevo, min_subject='fsaverage'): from mne import make_forward_solution from mne.minimum_norm import write_inverse_operator fnlist = get_files_from_list(fnevo) # loop across all filenames for fname in fnlist: fn_path = os.path.split(fname)[0] name = os.path.basename(fname) #fn_inv = fname[:fname.rfind('-ave.fif')] + ',ave-inv.fif' subject = name.split('_')[0] fn_inv = fn_path + '/%s_fibp1-45,ave-inv.fif' %subject subject_path = subjects_dir + '/%s' %subject #min_dir = subjects_dir + '/%s' %min_subject fn_trans = fn_path + '/%s-trans.fif' % subject #fn_cov = fn_path + '/%s_empty,nr,fibp1-45-cov.fif' % subject fn_cov = fn_path + '/%s_empty,fibp1-45-cov.fif' %subject fn_src = subject_path + '/bem/%s-oct-6-src.fif' % subject fn_bem = subject_path + '/bem/%s-5120-5120-5120-bem-sol.fif' % subject [evoked] = mne.read_evokeds(fname) evoked.pick_types(meg=True, ref_meg=False) noise_cov = mne.read_cov(fn_cov) #noise_cov = mne.cov.regularize(noise_cov, evoked.info, # mag=0.05, grad=0.05, proj=True) fwd = make_forward_solution(evoked.info, fn_trans, fn_src, fn_bem) fwd['surf_ori'] = True inv = mne.minimum_norm.make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.2, depth=0.8, limit_depth_chs=False) write_inverse_operator(fn_inv, inv)
def test_min_distance_fit_dipole(): """Test dipole min_dist to inner_skull.""" subject = 'sample' raw = read_raw_fif(fname_raw, preload=True) # select eeg data picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') info = pick_info(raw.info, picks) # Let's use cov = Identity cov = read_cov(fname_cov) cov['data'] = np.eye(cov['data'].shape[0]) # Simulated scal map simulated_scalp_map = np.zeros(picks.shape[0]) simulated_scalp_map[27:34] = 1 simulated_scalp_map = simulated_scalp_map[:, None] evoked = EvokedArray(simulated_scalp_map, info, tmin=0) min_dist = 5. # distance in mm bem = read_bem_solution(fname_bem) dip, residual = fit_dipole(evoked, cov, bem, fname_trans, min_dist=min_dist) dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir) # Constraints are not exact, so bump the minimum slightly assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.)) assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans, -1.)
def test_make_inverse_operator_fixed(): """Test MNE inverse computation (fixed orientation) """ fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False, force_fixed=False) fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False, force_fixed=True) evoked = _get_evoked() noise_cov = read_cov(fname_cov) # can't make depth-weighted fixed inv without surf ori fwd assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_1, noise_cov, depth=0.8, loose=None, fixed=True) # can't make fixed inv with depth weighting without free ori fwd assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2, noise_cov, depth=0.8, loose=None, fixed=True) # now compare to C solution # note that the forward solution must not be surface-oriented # to get equivalency (surf_ori=True changes the normals) inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None, loose=None, fixed=True) inverse_operator_nodepth = read_inverse_operator(fname_inv_fixed_nodepth) _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 0, 1e-2) # Inverse has 306 channels - 6 proj = 302 assert_true(compute_rank_inverse(inverse_operator_nodepth) == 302)
def test_plot_instance_components(): """Test plotting of components as instances of raw and epochs.""" import matplotlib.pyplot as plt raw = _get_raw() picks = _get_picks(raw) ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_pca_components=3, n_pca_components=3) with pytest.warns(RuntimeWarning, match='projection'): ica.fit(raw, picks=picks) fig = ica.plot_sources(raw, exclude=[0], title='Components') for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup', 'pagedown', 'home', 'end', 'f11', 'b']: fig.canvas.key_press_event(key) ax = fig.get_axes()[0] line = ax.lines[0] _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data') _fake_click(fig, ax, [-0.1, 0.9]) # click on y-label fig.canvas.key_press_event('escape') plt.close('all') epochs = _get_epochs() fig = ica.plot_sources(epochs, exclude=[0], title='Components') for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup', 'pagedown', 'home', 'end', 'f11', 'b']: fig.canvas.key_press_event(key) # Test a click ax = fig.get_axes()[0] line = ax.lines[0] _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data') _fake_click(fig, ax, [-0.1, 0.9]) # click on y-label fig.canvas.key_press_event('escape') plt.close('all')
def test_plot_ica_panel(): """Test plotting of ICA panel """ ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_pca_components=3, n_pca_components=3) ica.decompose_raw(raw, picks=ica_picks) ica.plot_sources_raw(raw)
def test_gamma_map(): """Test Gamma MAP inverse""" forward = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True) forward = pick_types_forward(forward, meg=False, eeg=True) evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.16) # crop to nice window near samp border cov = read_cov(fname_cov) cov = regularize(cov, evoked.info) alpha = 0.5 stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=True, update_mode=1) _check_stc(stc, evoked, 68477) stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=1) _check_stc(stc, evoked, 82010) # force fixed orientation stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=2, loose=None, return_residual=False) _check_stc(stc, evoked, 85739, 20)
def test_plot_cov(): """Test plotting of covariances """ raw = _get_raw() cov = read_cov(cov_fname) with warnings.catch_warnings(record=True): # bad proj fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
def test_gamma_map(): """Test Gamma MAP inverse""" forward = read_forward_solution(fname_fwd) forward = convert_forward_solution(forward, surf_ori=True) forward = pick_types_forward(forward, meg=False, eeg=True) evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak cov = read_cov(fname_cov) cov = regularize(cov, evoked.info) alpha = 0.5 stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=True, update_mode=1) _check_stc(stc, evoked, 68477) stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=1) _check_stc(stc, evoked, 82010) dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=1, return_as_dipoles=True) assert_true(isinstance(dips[0], Dipole)) stc_dip = make_stc_from_dipoles(dips, forward['src']) _check_stcs(stc, stc_dip) # force fixed orientation stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=2, loose=0, return_residual=False) _check_stc(stc, evoked, 85739, 20)
def test_plot_white(): """Test plot_white.""" import matplotlib.pyplot as plt cov = read_cov(cov_fname) cov['method'] = 'empirical' cov['projs'] = [] # avoid warnings evoked = _get_epochs().average() # test rank param. evoked.plot_white(cov, rank={'mag': 101, 'grad': 201}, time_unit='s') evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank param evoked.plot_white(cov, rank={'grad': 201}, time_unit='s') pytest.raises( ValueError, evoked.plot_white, cov, rank={'mag': 101, 'grad': 201, 'meg': 306}, time_unit='s') pytest.raises( ValueError, evoked.plot_white, cov, rank={'meg': 306}, time_unit='s') evoked.plot_white([cov, cov], time_unit='s') plt.close('all') # Hack to test plotting of maxfiltered data evoked_sss = evoked.copy() sss = dict(sss_info=dict(in_order=80, components=np.arange(80))) evoked_sss.info['proc_history'] = [dict(max_info=sss)] evoked_sss.plot_white(cov, rank={'meg': 64}, time_unit='s') pytest.raises( ValueError, evoked_sss.plot_white, cov, rank={'grad': 201}, time_unit='s') evoked_sss.plot_white(cov, time_unit='s') plt.close('all')
def test_apply_mne_inverse_fixed_raw(): """Test MNE with fixed-orientation inverse operator on Raw """ raw = fiff.Raw(fname_raw) start = 3 stop = 10 _, times = raw[0, start:stop] label_lh = read_label(fname_label % 'Aud-lh') # create a fixed-orientation inverse operator fwd = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True) noise_cov = read_cov(fname_cov) inv_op = make_inverse_operator(raw.info, fwd, noise_cov, loose=None, depth=0.8, fixed=True) stc = apply_inverse_raw(raw, inv_op, lambda2, "dSPM", label=label_lh, start=start, stop=stop, nave=1, pick_ori=None, buffer_size=None) stc2 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM", label=label_lh, start=start, stop=stop, nave=1, pick_ori=None, buffer_size=3) assert_true(stc.subject == 'sample') assert_true(stc2.subject == 'sample') assert_array_almost_equal(stc.times, times) assert_array_almost_equal(stc2.times, times) assert_array_almost_equal(stc.data, stc2.data)
def test_gamma_map_vol_sphere(): """Gamma MAP with a sphere forward and volumic source space""" evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak cov = read_cov(fname_cov) cov = regularize(cov, evoked.info) info = evoked.info sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) src = mne.setup_volume_source_space(subject=None, pos=15., mri=None, sphere=(0.0, 0.0, 0.0, 80.0), bem=None, mindist=5.0, exclude=2.0) fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere, eeg=False, meg=True) alpha = 0.5 assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha, loose=0, return_residual=False) assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha, loose=0.2, return_residual=False) stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=2, return_residual=False) assert_array_almost_equal(stc.times, evoked.times, 5)
def test_make_forward_dipole(): """Test forward-projecting dipoles.""" rng = np.random.RandomState(0) evoked = read_evokeds(fname_evo)[0] cov = read_cov(fname_cov) cov['projs'] = [] # avoid proj warning dip_c = read_dipole(fname_dip) # Only use magnetometers for speed! picks = pick_types(evoked.info, meg='mag', eeg=False)[::8] evoked.pick_channels([evoked.ch_names[p] for p in picks]) evoked.info.normalize_proj() info = evoked.info # Make new Dipole object with n_test_dipoles picked from the dipoles # in the test dataset. n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles]) dip_test = Dipole(times=dip_c.times[dipsel], pos=dip_c.pos[dipsel], amplitude=dip_c.amplitude[dipsel], ori=dip_c.ori[dipsel], gof=dip_c.gof[dipsel]) sphere = make_sphere_model(head_radius=0.1) # Warning emitted due to uneven sampling in time with pytest.warns(RuntimeWarning, match='unevenly spaced'): fwd, stc = make_forward_dipole(dip_test, sphere, info, trans=fname_trans) # stc is list of VolSourceEstimate's assert isinstance(stc, list) for n_dip in range(n_test_dipoles): assert isinstance(stc[n_dip], VolSourceEstimate) # Now simulate evoked responses for each of the test dipoles, # and fit dipoles to them (sphere model, MEG and EEG) times, pos, amplitude, ori, gof = [], [], [], [], [] nave = 100 # add a tiny amount of noise to the simulated evokeds for s in stc: evo_test = simulate_evoked(fwd, s, info, cov, nave=nave, random_state=rng) # evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info)) dfit, resid = fit_dipole(evo_test, cov, sphere, None) times += dfit.times.tolist() pos += dfit.pos.tolist() amplitude += dfit.amplitude.tolist() ori += dfit.ori.tolist() gof += dfit.gof.tolist() # Create a new Dipole object with the dipole fits dip_fit = Dipole(times, pos, amplitude, ori, gof) # check that true (test) dipoles and fits are "close" # cf. mne/tests/test_dipole.py diff = dip_test.pos - dip_fit.pos corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1] dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1))) gc_dist = 180 / np.pi * \ np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1))) amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude)**2)) # Make sure each coordinate is close to reference # NB tolerance should be set relative to snr of simulated evoked! assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2, err_msg='position mismatch') assert dist < 1e-2 # within 1 cm assert corr > 0.985 assert gc_dist < 20 # less than 20 degrees assert amp_err < 10e-9 # within 10 nAm # Make sure rejection works with BEM: one dipole at z=1m # NB _make_forward.py:_prepare_for_forward will raise a RuntimeError # if no points are left after min_dist exclusions, hence 2 dips here! dip_outside = Dipole(times=[0., 0.001], pos=[[0., 0., 1.0], [0., 0., 0.040]], amplitude=[100e-9, 100e-9], ori=[[1., 0., 0.], [1., 0., 0.]], gof=1) pytest.raises(ValueError, make_forward_dipole, dip_outside, fname_bem, info, fname_trans) # if we get this far, can safely assume the code works with BEMs too # -> use sphere again below for speed # Now make an evenly sampled set of dipoles, some simultaneous, # should return a VolSourceEstimate regardless times = [0., 0., 0., 0.001, 0.001, 0.002] pos = np.random.rand(6, 3) * 0.020 + \ np.array([0., 0., 0.040])[np.newaxis, :] amplitude = np.random.rand(6) * 100e-9 ori = np.eye(6, 3) + np.eye(6, 3, -3) gof = np.arange(len(times)) / len(times) # arbitrary dip_even_samp = Dipole(times, pos, amplitude, ori, gof) fwd, stc = make_forward_dipole(dip_even_samp, sphere, info, trans=fname_trans) assert isinstance(stc, VolSourceEstimate) assert_allclose(stc.times, np.arange(0., 0.003, 0.001))
def test_simulate_raw_sphere(raw_data, tmpdir): """Test simulation of raw data with sphere model.""" seed = 42 raw, src, stc, trans, sphere = raw_data assert len(pick_types(raw.info, meg=False, ecg=True)) == 1 tempdir = str(tmpdir) # head pos head_pos_sim = _get_head_pos_sim(raw) # # Test raw simulation with basic parameters # raw.info.normalize_proj() cov = read_cov(cov_fname) cov['projs'] = raw.info['projs'] raw.info['bads'] = raw.ch_names[:1] sphere_norad = make_sphere_model('auto', None, raw.info) raw_meg = raw.copy().pick_types() raw_sim = simulate_raw(raw_meg.info, stc, trans, src, sphere_norad, head_pos=head_pos_sim) # Test IO on processed data test_outname = op.join(tempdir, 'sim_test_raw.fif') raw_sim.save(test_outname) raw_sim_loaded = read_raw_fif(test_outname, preload=True) assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20) del raw_sim # make sure it works with EEG-only and MEG-only raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False).info, stc, trans, src, sphere) raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True).info, stc, trans, src, sphere) raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True).info, stc, trans, src, sphere) for this_raw in (raw_sim_meg, raw_sim_eeg, raw_sim_meeg): add_eog(this_raw, random_state=seed) for this_raw in (raw_sim_meg, raw_sim_meeg): add_ecg(this_raw, random_state=seed) with pytest.raises(RuntimeError, match='only add ECG artifacts if MEG'): add_ecg(raw_sim_eeg) assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])), raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20) del raw_sim_meg, raw_sim_eeg, raw_sim_meeg # check that raw-as-info is supported n_samp = len(stc.times) raw_crop = raw.copy().crop(0., (n_samp - 1.) / raw.info['sfreq']) assert len(raw_crop.times) == len(stc.times) raw_sim = simulate_raw(raw_crop.info, stc, trans, src, sphere) with catch_logging() as log: raw_sim_2 = simulate_raw(raw_crop.info, stc, trans, src, sphere, verbose=True) log = log.getvalue() assert '1 STC iteration provided' in log assert len(raw_sim_2.times) == n_samp assert_allclose(raw_sim[:, :n_samp][0], raw_sim_2[:, :n_samp][0], rtol=1e-5, atol=1e-30) del raw_sim, raw_sim_2 # check that different interpolations are similar given small movements raw_sim = simulate_raw(raw.info, stc, trans, src, sphere, head_pos=head_pos_sim, interp='linear') raw_sim_hann = simulate_raw(raw.info, stc, trans, src, sphere, head_pos=head_pos_sim, interp='hann') assert_allclose(raw_sim[:][0], raw_sim_hann[:][0], rtol=1e-1, atol=1e-14) del raw_sim_hann # check that new Generator objects can be used if check_version('numpy', '1.17'): random_state = np.random.default_rng(seed) add_ecg(raw_sim, random_state=random_state) add_eog(raw_sim, random_state=random_state)
def test_mxne_vol_sphere(): """Test (TF-)MxNE with a sphere forward and volumic source space.""" evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) evoked.crop(tmin=-0.05, tmax=0.2) cov = read_cov(fname_cov) evoked_l21 = evoked.copy() evoked_l21.crop(tmin=0.081, tmax=0.1) info = evoked.info sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) src = mne.setup_volume_source_space(subject=None, pos=15., mri=None, sphere=(0.0, 0.0, 0.0, 80.0), bem=None, mindist=5.0, exclude=2.0) fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere, eeg=False, meg=True) alpha = 80. pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha, loose=0.0, return_residual=False, maxit=3, tol=1e-8, active_set_size=10) pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha, loose=0.2, return_residual=False, maxit=3, tol=1e-8, active_set_size=10) # irMxNE tests stc = mixed_norm(evoked_l21, fwd, cov, alpha, n_mxne_iter=1, maxit=30, tol=1e-8, active_set_size=10) assert isinstance(stc, VolSourceEstimate) assert_array_almost_equal(stc.times, evoked_l21.times, 5) # Compare orientation obtained using fit_dipole and gamma_map # for a simulated evoked containing a single dipole stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), vertices=stc.vertices[:1], tmin=stc.tmin, tstep=stc.tstep) evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, use_cps=True) dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80, n_mxne_iter=1, maxit=30, tol=1e-8, active_set_size=10, return_as_dipoles=True) amp_max = [np.max(d.amplitude) for d in dip_mxne] dip_mxne = dip_mxne[np.argmax(amp_max)] assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices] dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99 dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0]) assert dist < 4. # within 4 mm # Do with TF-MxNE for test memory savings alpha = 60. # overall regularization parameter l1_ratio = 0.01 # temporal regularization proportion stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4, tstep=16, wsize=32, window=0.1, alpha=alpha, l1_ratio=l1_ratio, return_residual=True) assert isinstance(stc, VolSourceEstimate) assert_array_almost_equal(stc.times, evoked.times, 5)
# Load real data as templates data_path = sample.data_path() raw = Raw(data_path + '/MEG/sample/sample_audvis_raw.fif') proj = read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif') raw.info['projs'] += proj raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif' cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif' fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True) fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads']) cov = read_cov(cov_fname) condition = 'Left Auditory' evoked_template = read_evokeds(ave_fname, condition=condition, baseline=None) evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True, exclude=raw.info['bads']) label_names = ['Aud-lh', 'Aud-rh'] labels = [ read_label(data_path + '/MEG/sample/labels/%s.label' % ln) for ln in label_names ] ###############################################################################
os.chdir('inverse') fn = 'All_40-sss_eq_' + session1[n] + '-ave.fif' evoked = mne.read_evokeds(fn, condition=0, baseline=(None, 0), kind='average', proj=True) info = evoked.info os.chdir('../forward') fn = session1[n] + '-sss-fwd.fif' fwd = mne.read_forward_solution(fn, force_fixed=False, surf_ori=True) #Inverse here os.chdir('../covariance') fn = session1[n] + '-40-sss-cov.fif' cov = mne.read_cov(fn) os.chdir('../inverse') # Free: loose = 1; Loose: loose = 0.2 inv = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8, use_cps=True) fn = session1[n] + '-depth8-inv.fif' mne.minimum_norm.write_inverse_operator(fn, inv)
def test_cov_estimation_with_triggers(): """Test estimation from raw with triggers.""" tempdir = _TempDir() raw = read_raw_fif(raw_fname) raw.set_eeg_reference(projection=True).load_data() events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) _assert_cov(cov, read_cov(cov_km_fname)) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) assert_true(err < 0.05, msg=err) # cov using a list of epochs and keep_sample_mean=True epochs = [ Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids ] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) _assert_cov(cov, read_cov(cov_fname), nfree=False) method_params = {'empirical': {'assume_centered': False}} assert_raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method_params=method_params) assert_raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method='factor_analysis') # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) _assert_cov(cov, cov_read, 1e-5) # cov with list of epochs with different projectors epochs = [ Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True), Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False) ] # these should fail assert_raises(ValueError, compute_covariance, epochs) assert_raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above with warnings.catch_warnings(record=True) as w: # too few samples warning warnings.simplefilter('always') cov = compute_covariance(epochs, projs=epochs[0].info['projs']) cov = compute_covariance(epochs, projs=[]) assert_equal(len(w), 2) # test new dict support epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0, proj=True, reject=reject, preload=True) with warnings.catch_warnings(record=True): # samples compute_covariance(epochs) # projs checking compute_covariance(epochs, projs=[]) assert_raises(TypeError, compute_covariance, epochs, projs='foo') assert_raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_plot_ica_properties(): """Test plotting of ICA properties.""" raw = _get_raw(preload=True).crop(0, 5) raw.add_proj([], remove_existing=True) events = make_fixed_length_events(raw) picks = _get_picks(raw)[:6] pick_names = [raw.ch_names[k] for k in picks] raw.pick_channels(pick_names) reject = dict(grad=4000e-13, mag=4e-12) epochs = Epochs(raw, events[:3], event_id, tmin, tmax, baseline=(None, 0), preload=True) ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_iter=1, random_state=0) with pytest.warns(RuntimeWarning, match='projection'): ica.fit(raw) # test _create_properties_layout fig, ax = _create_properties_layout() assert_equal(len(ax), 5) topoargs = dict(topomap_args={'res': 4, 'contours': 0, "sensors": False}) ica.plot_properties(raw, picks=0, **topoargs) ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs) ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5}, topomap_args={'res': 4, 'colorbar': True}, psd_args={'fmax': 65.}, plot_std=False, figsize=[4.5, 4.5], reject=reject) plt.close('all') with pytest.raises(TypeError, match='must be an instance'): ica.plot_properties(epochs, dB=list('abc')) with pytest.raises(TypeError, match='must be an instance'): ica.plot_properties(ica) with pytest.raises(TypeError, match='must be an instance'): ica.plot_properties([0.2]) with pytest.raises(TypeError, match='must be an instance'): plot_ica_properties(epochs, epochs) with pytest.raises(TypeError, match='must be an instance'): ica.plot_properties(epochs, psd_args='not dict') with pytest.raises(TypeError, match='must be an instance'): ica.plot_properties(epochs, plot_std=[]) fig, ax = plt.subplots(2, 3) ax = ax.ravel()[:-1] ica.plot_properties(epochs, picks=1, axes=ax, **topoargs) pytest.raises(TypeError, plot_ica_properties, epochs, ica, picks=[0, 1], axes=ax) pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes') plt.close('all') # Test merging grads. pick_names = raw.ch_names[:15:2] + raw.ch_names[1:15:2] raw = _get_raw(preload=True).pick_channels(pick_names).crop(0, 5) raw.info.normalize_proj() ica = ICA(random_state=0, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) ica.plot_properties(raw) plt.close('all') # Test handling of zeros ica = ICA(random_state=0, max_iter=1) epochs.pick_channels(pick_names) with pytest.warns(UserWarning, match='did not converge'): ica.fit(epochs) epochs._data[0] = 0 with pytest.warns(None): # Usually UserWarning: Infinite value .* for epo ica.plot_properties(epochs, **topoargs) plt.close('all') # Test Raw with annotations annot = Annotations(onset=[1], duration=[1], description=['BAD']) raw_annot = _get_raw(preload=True).set_annotations(annot).crop(0, 8) raw_annot.pick(np.arange(10)) raw_annot.del_proj() with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw_annot) # drop bad data segments fig = ica.plot_properties(raw_annot, picks=[0, 1], **topoargs) assert_equal(len(fig), 2) # don't drop ica.plot_properties(raw_annot, reject_by_annotation=False, **topoargs) plt.close('all')
myPATH = '/net/server/data/Archive/aut_gamma/orekhova/KI/Scripts_bkp/Shishkina/KI/' subjects_dir = PATHfrom + 'freesurfersubjects' subjects_dir_case = PATHfrom + 'freesurfersubjects/Case' for subject in SUBJECTS: subjpath = PATHfrom + 'SUBJECTS/' + subject + '/ICA_nonotch_crop/epochs/' savepath = myPATH + 'Results_Alpha_and_Gamma/' raw_fname = PATHfrom + 'SUBJECTS/' + subject + '/ICA_nonotch_crop/' + subject + '_rings_ICA_raw.fif' #load forward model fwd = mne.read_forward_solution(savepath + subject + '/' + subject + '_fwd', verbose=None) #load noise covariance matrix from empty room data noise_cov = mne.read_cov(savepath + subject + '/' + subject + 'noise_cov_10_17Hz', verbose=None) original_data = mne.io.read_raw_fif(raw_fname, preload=False) original_info = original_data.info original_info['sfreq'] = 500 #for the first 3 CSP components and the second 3 CSP components (commented) diff = [] CSP = ['1', '2', '3', '4', '5', '6', '97', '98', '99', '100', '101', '102'] for num in CSP: #load csp data for fast from fieldtrip ftname = savepath + subject + '/' + subject + '_fieldtrip_csp_1_6_and_97_102_old_to_mne.mat' fast_epo = mne.read_epochs_fieldtrip(ftname, original_info,
def test_rank(): """Test cov rank estimation.""" # Test that our rank estimation works properly on a simple case evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=False) cov = read_cov(cov_fname) ch_names = [ ch for ch in evoked.info['ch_names'] if '053' not in ch and ch.startswith('EEG') ] cov = prepare_noise_cov(cov, evoked.info, ch_names, None) assert_equal(cov['eig'][0], 0.) # avg projector should set this to zero assert_true((cov['eig'][1:] > 0).all()) # all else should be > 0 # Now do some more comprehensive tests raw_sample = read_raw_fif(raw_fname) raw_sss = read_raw_fif(hp_fif_fname) raw_sss.add_proj(compute_proj_raw(raw_sss)) cov_sample = compute_raw_covariance(raw_sample) cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj()) cov_sss = compute_raw_covariance(raw_sss) cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list( itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)])) for (cov, picks_list, this_info), scalings in iter_tests: for ch_type, picks in picks_list: this_very_info = pick_info(this_info, picks) # compute subset of projs this_projs = [ c['active'] and len( set(c['data']['col_names']).intersection( set(this_very_info['ch_names']))) > 0 for c in cov['projs'] ] n_projs = sum(this_projs) # count channel types ch_types = [ channel_type(this_very_info, idx) for idx in range(len(picks)) ] n_eeg, n_mag, n_grad = [ ch_types.count(k) for k in ['eeg', 'mag', 'grad'] ] n_meg = n_mag + n_grad if ch_type in ('all', 'eeg'): n_projs_eeg = 1 else: n_projs_eeg = 0 # check sss if len(this_very_info['proc_history']) > 0: mf = this_very_info['proc_history'][0]['max_info'] n_free = _get_sss_rank(mf) if 'mag' not in ch_types and 'grad' not in ch_types: n_free = 0 # - n_projs XXX clarify expected_rank = n_free + n_eeg if n_projs > 0 and ch_type in ('all', 'eeg'): expected_rank -= n_projs_eeg else: expected_rank = n_meg + n_eeg - n_projs C = cov['data'][np.ix_(picks, picks)] est_rank = _estimate_rank_meeg_cov(C, this_very_info, scalings=scalings) assert_equal(expected_rank, est_rank)
def test_simulate_evoked(): """ Test simulation of evoked data """ raw = mne.fiff.Raw(raw_fname) fwd = read_forward_solution(fwd_fname, force_fixed=True) fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads']) cov = mne.read_cov(cov_fname) label_names = ['Aud-lh', 'Aud-rh'] labels = [ read_label( op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)) for label in label_names ] evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None) evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True, exclude=raw.info['bads']) snr = 6 # dB tmin = -0.1 sfreq = 1000. # Hz tstep = 1. / sfreq n_samples = 600 times = np.linspace(tmin, tmin + n_samples * tstep, n_samples) # Generate times series from 2 Morlet wavelets stc_data = np.zeros((len(labels), len(times))) Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5]) stc_data[0][:len(Ws[0])] = np.real(Ws[0]) stc_data[1][:len(Ws[1])] = np.real(Ws[1]) stc_data *= 100 * 1e-9 # use nAm as unit # time translation stc_data[1] = np.roll(stc_data[1], 80) stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep, random_state=0) # Generate noisy evoked data iir_filter = [1, -0.9] evoked = generate_evoked(fwd, stc, evoked_template, cov, snr, tmin=0.0, tmax=0.2, iir_filter=iir_filter) assert_array_almost_equal(evoked.times, stc.times) assert_true(len(evoked.data) == len(fwd['sol']['data'])) # make a vertex that doesn't exist in fwd, should throw error stc_bad = stc.copy() mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']]) stc_bad.vertno[0][0] = mv + 1 assert_raises(RuntimeError, generate_evoked, fwd, stc_bad, evoked_template, cov, snr, tmin=0.0, tmax=0.2)
def run_inverse(subject_id): tasks = ['AVLearn', 'AVLearn'] days = [100, 200] for task, day in zip(tasks, days): subject = group_name + "%d" % subject_id print("processing subject: %s" % subject) fname = op.join(MEG_data_path, subject, task + '_%d' % (day + subject_id) + '_tsss_mc.fif') if day == 100: evokeds_AV = mne.read_evokeds(fname.replace("_tsss_mc", "-ave"), cond_day1_AV) evokeds_FB = mne.read_evokeds(fname.replace("_tsss_mc", "-ave"), cond_day1_FB) evokeds_IT = mne.read_evokeds(fname.replace( 'tsss_mc', 'evoked_RG')) elif day == 200: evokeds_AV = mne.read_evokeds(fname.replace("_tsss_mc", "-ave"), cond_day2_AV) evokeds_FB = mne.read_evokeds(fname.replace("_tsss_mc", "-ave"), cond_day2_FB) evokeds_IT = mne.read_evokeds(fname.replace( 'tsss_mc', 'evoked_RG')) epo_Nave_avg = np.load(fname.replace("tsss_mc.fif", 'epo_Nave_avg.npy'), allow_pickle=True).item() epo_Nave_avg_RG = np.load(fname.replace("tsss_mc.fif", 'epo_Nave_avg_RG.npy'), allow_pickle=True).item() # only for nave>15 evokeds_AV = [ evk for evk in evokeds_AV if (epo_Nave_avg[evk.comment] > 15) ] evokeds_FB = [ evk for evk in evokeds_FB if (epo_Nave_avg[evk.comment] > 15) ] cov_AV = mne.read_cov(fname.replace('_tsss_mc.fif', '_AV-cov.fif')) cov_FB = mne.read_cov(fname.replace('_tsss_mc.fif', '_FB-cov.fif')) forward = mne.read_forward_solution( fname.replace('_tsss_mc.fif', '-meg-ico5-fwd.fif')) inverse_operator_AV = make_inverse_operator(evokeds_AV[0].info, forward, cov_AV, loose=1, depth=0.8) inverse_operator_FB = make_inverse_operator(evokeds_FB[0].info, forward, cov_FB, loose=1, depth=0.8) inverse_operator_IT = make_inverse_operator(evokeds_IT[0].info, forward, cov_AV, loose=1, depth=0.8) labels = mne.read_labels_from_annot(subject=task + '_' + str(day + subject_id), parc='aparc', subjects_dir=MRI_data_path) label_names = [labels[indx].name for indx in range(len(labels))] STC_L = labels[0] + labels[60] #'bankssts-lh + superiortemporal-lh' STC_R = labels[1] + labels[61] #'bankssts-rh + superiortemporal-rh' labels.append(STC_L) labels.append(STC_R) label_names.append(STC_L.name) label_names.append(STC_R.name) snr = 3.0 lambda2 = 1.0 / snr**2 methods = ['dSPM', 'MNE'] pick_ori = None mode = 'mean' for method in methods: for evoked in evokeds_AV: stc = apply_inverse(evoked.apply_baseline(baseline=(None, 0)), inverse_operator_AV, lambda2, method=method, pick_ori=pick_ori) label_ts = mne.extract_label_time_course( stc, labels, inverse_operator_AV['src'], allow_empty=True, mode=mode) if method == 'dSPM': label_ts = label_ts * math.sqrt( epo_Nave_avg[evoked.comment]) / math.sqrt(evoked.nave) pd.DataFrame(label_ts.T, index=stc.times, columns=label_names).to_csv( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method + '.csv')) stc_fsaverage = mne.compute_source_morph( stc, subjects_dir=MRI_data_path).apply(stc) if method == 'dSPM': stc_fsaverage = stc_fsaverage * math.sqrt( epo_Nave_avg[evoked.comment]) / math.sqrt(evoked.nave) stc_fsaverage.save( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method)) for evoked in evokeds_FB: stc = apply_inverse(evoked.apply_baseline(baseline=(None, 0)), inverse_operator_FB, lambda2, method=method, pick_ori=pick_ori) label_ts = mne.extract_label_time_course( stc, labels, inverse_operator_FB['src'], allow_empty=True, mode=mode) if method == 'dSPM': label_ts = label_ts * math.sqrt( epo_Nave_avg[evoked.comment]) / math.sqrt(evoked.nave) pd.DataFrame(label_ts.T, index=stc.times, columns=label_names).to_csv( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method + '.csv')) stc_fsaverage = mne.compute_source_morph( stc, subjects_dir=MRI_data_path).apply(stc) if method == 'dSPM': stc_fsaverage = stc_fsaverage * math.sqrt( epo_Nave_avg[evoked.comment]) / math.sqrt(evoked.nave) stc_fsaverage.save( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method)) for evoked in evokeds_IT: stc = apply_inverse(evoked.apply_baseline(baseline=(None, 0)), inverse_operator_IT, lambda2, method=method, pick_ori=pick_ori) label_ts = mne.extract_label_time_course( stc, labels, inverse_operator_IT['src'], allow_empty=True, mode=mode) if method == 'dSPM': label_ts = label_ts * math.sqrt( epo_Nave_avg_RG[evoked.comment]) / math.sqrt( evoked.nave) pd.DataFrame(label_ts.T, index=stc.times, columns=label_names).to_csv( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method + '.csv')) stc_fsaverage = mne.compute_source_morph( stc, subjects_dir=MRI_data_path).apply(stc) if method == 'dSPM': stc_fsaverage = stc_fsaverage * math.sqrt( epo_Nave_avg_RG[evoked.comment]) / math.sqrt( evoked.nave) stc_fsaverage.save( fname.replace( 'tsss_mc.fif', evoked.comment.replace('/', '_') + '-' + method))
fname_evoked, fname_trans, src, fname_bem, mindist=5.0, # ignore sources<=5mm from innerskull meg=True, eeg=False, n_jobs=1) leadfield = fwd['sol']['data'] print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape) src_fwd = fwd['src'] n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd))) print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n)) # Load data condition = 'Left Auditory' evoked = mne.read_evokeds(fname_evoked, condition=condition, baseline=(None, 0)) noise_cov = mne.read_cov(fname_cov) ############################################################################### # Compute inverse solution # ------------------------ snr = 3.0 # use smaller SNR for raw data inv_method = 'dSPM' # sLORETA, MNE, dSPM parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s' loose = dict(surface=0.2, volume=1.) lambda2 = 1.0 / snr ** 2 inverse_operator = make_inverse_operator( evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True) stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
def noise_cov_io(): """Get noise-covariance (from mne.io.tests.data).""" return mne.read_cov(fname_cov_io)
def get_estimate_baseline(M, ROI_list, n_ROI_valid, fwd_path, evoked_path, noise_cov_path, out_name, method="dSPM", lambda2=1.0, prior_Q0=None, prior_Q=None, prior_sigma_J_list=None, prior_A=None, depth=0.8, MaxIter0=100, MaxIter=50, tol0=1E-4, tol=1E-2, verbose0=True, verbose=False, flag_A_time_vary=False, flag_sign_flip=False, force_fixed=True): """ Inputs: M, [q, n_channels, n_times] sensor data ROI_list, ROI indices list fwd_path, full path of the forward solution evoked_path, full path of the evoked template noise_cov_path, full path of the noise covariance out_name, full path of the mat name to save priors: prior_Q0, prior_Q, prior_sigma_J_list, not implemented, may be inverse gamma or gamma prior_A, dict(lambda0 = 0.0, lambda1 = 1.0) depth: forward weighting parameter verbose: whiten_flag: if True, whiten the data, so that sensor error is identity n_ini, number of random initializations TBA """ q, _, T0 = M.shape T = T0 - 1 # this function returns a list, take the first element evoked = mne.read_evokeds(evoked_path)[0] # depth weighting, TO BE MODIFIED print force_fixed fwd0 = mne.read_forward_solution(fwd_path, force_fixed=force_fixed, surf_ori=True) fwd = copy.deepcopy(fwd0) noise_cov = mne.read_cov(noise_cov_path) # orientation of dipoles ind0 = fwd['src'][0]['inuse'] ind1 = fwd['src'][1]['inuse'] # positions of dipoles rr = np.vstack( [fwd['src'][0]['rr'][ind0 == 1, :], fwd['src'][1]['rr'][ind1 == 1, :]]) rr = rr / np.max(np.sum(rr**2, axis=1)) nn = np.vstack( [fwd['src'][0]['nn'][ind0 == 1, :], fwd['src'][1]['nn'][ind1 == 1, :]]) # number of dipoles #m = rr.shape[0] ch_names = evoked.info['ch_names'] # create the epochs first? M_all = np.zeros([q, len(ch_names), T0]) valid_channel_ind = [ i for i in range(len(ch_names)) if ch_names[i] not in evoked.info['bads'] ] M_all[:, valid_channel_ind, :] = M.copy() events = np.ones([M.shape[0], 3], dtype=np.int) epochs = mne.EpochsArray(data=M_all, info=evoked.info, events=events, tmin=evoked.times[0], event_id=None, reject=None) # if method is MNE if method in ['MNE', 'dSPM', 'sLORETA']: # create inverse solution # get the ROI_flipped data inv_op = mne.minimum_norm.make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.0, depth=depth, fixed=True) stcs = mne.minimum_norm.apply_inverse_epochs(epochs, inv_op, lambda2=lambda2, method=method) #roi_stc = mne.extract_label_time_course(stcs, labels, fwd['src'], mode = "mean_flip") elif method in ['LCMV']: # why this time window? data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15, method='shrunk') stcs = list() for r in range(q): tmp_evoked = epochs[r].average() stcs.append( mne.beamformer.lcmv(tmp_evoked, fwd, noise_cov, data_cov, reg=0.01, pick_ori='normal')) p = n_ROI_valid ROI_U = np.zeros([q, p, T0]) Sigma_J_list_hat = np.zeros(len(ROI_list)) for i in range(p): tmp_ind = ROI_list[i] # svd of nn tmp_nn = nn[tmp_ind, :] tmpu, tmpd, tmpv = np.linalg.svd(tmp_nn) signs = np.sign(np.dot( tmp_nn, tmpv[0, :])) if flag_sign_flip else np.ones(len(tmp_ind)) for r in range(q): ROI_U[r, i, :] = np.sum(stcs[r].data[tmp_ind].T * signs, axis=1) Sigma_J_list_hat[i] += np.var((stcs[r].data[tmp_ind].T * signs).T - ROI_U[r, i, :]) Sigma_J_list_hat[i] /= np.float(q) for r in range(q): tmp = stcs[r].data[ROI_list[-1]] Sigma_J_list_hat[-1] += np.var(tmp) Sigma_J_list_hat[-1] /= np.float(q) # maybe also compare with the mne solution #roi_stc = mne.extract_label_time_course(stcs, labels[i], fwd['src'], mode = "mean_flip") u_array = ROI_U.transpose([0, 2, 1]) Gamma0_0 = np.eye(p) A_0 = np.zeros([T, p, p]) Gamma_0 = np.eye(p) # first run the non_prior version to get a global solution Gamma0_1, A_1, Gamma_1 = get_param_given_u( u_array, Gamma0_0, A_0, Gamma_0, flag_A_time_vary=flag_A_time_vary, prior_Q0=None, prior_A=None, prior_Q=None, MaxIter0=MaxIter0, tol0=tol0, verbose0=verbose0, MaxIter=MaxIter, tol=tol, verbose=verbose) # compute the Q0, A, Q, with priors Gamma0_hat, A_hat, Gamma_hat = get_param_given_u( u_array, Gamma0_1, A_1, Gamma_1, flag_A_time_vary=flag_A_time_vary, prior_Q0=prior_Q0, prior_A=prior_A, prior_Q=prior_Q, MaxIter0=MaxIter0, tol0=tol0, verbose0=verbose0, MaxIter=MaxIter, tol=tol, verbose=verbose) Q0_hat = Gamma0_hat.dot(Gamma0_hat.T) Q_hat = Gamma_hat.dot(Gamma_hat.T) result = dict(Q0_hat=Q0_hat, A_hat=A_hat, Q_hat=Q_hat, method=method, lambda2=lambda2, u_array_hat=u_array, Sigma_J_list_hat=Sigma_J_list_hat) scipy.io.savemat(out_name, result)
def test_cov_estimation_with_triggers(rank, tmpdir): """Test estimation from raw with triggers.""" raw = read_raw_fif(raw_fname) raw.set_eeg_reference(projection=True).load_data() events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) cov_km = read_cov(cov_km_fname) # adjust for nfree bug cov_km['nfree'] -= 1 _assert_cov(cov, cov_km) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert np.all(cov.data != cov_tmin_tmax.data) err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) assert err < 0.05 # cov using a list of epochs and keep_sample_mean=True epochs = [ Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids ] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert cov.ch_names == cov2.ch_names # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) assert cov_km.nfree == cov.nfree _assert_cov(cov, read_cov(cov_fname), nfree=False) method_params = {'empirical': {'assume_centered': False}} pytest.raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method_params=method_params) pytest.raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method='shrunk', rank=rank) # test IO when computation done in Python cov.save(tmpdir.join('test-cov.fif')) # test saving cov_read = read_cov(tmpdir.join('test-cov.fif')) _assert_cov(cov, cov_read, 1e-5) # cov with list of epochs with different projectors epochs = [ Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True), Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False) ] # these should fail pytest.raises(ValueError, compute_covariance, epochs) pytest.raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_covariance(epochs, projs=epochs[0].info['projs']) with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_covariance(epochs, projs=[]) # test new dict support epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0, proj=True, reject=reject, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): compute_covariance(epochs) with pytest.warns(RuntimeWarning, match='Too few samples'): compute_covariance(epochs, projs=[]) pytest.raises(TypeError, compute_covariance, epochs, projs='foo') pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True, proj=True): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, selection=left_temporal_channels) picks = picks[::2] # decimate for speed # add a couple channels we will consider bad bad_picks = [100, 101] bads = [raw.ch_names[pick] for pick in bad_picks] assert not any(pick in picks for pick in bad_picks) picks = np.concatenate([picks, bad_picks]) raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info['bads'] = bads # add more bads if proj: raw.info.normalize_proj() # avoid projection warnings else: raw.del_proj() if epochs: # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=epochs_preload, reject=reject) if epochs_preload: epochs.resample(200, npad=0) epochs.crop(0, None) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov['projs'] = [] # avoid warning noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True, rank=None) if data_cov: data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145) else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
def test_cov_rank_estimation(rank_method, proj, meg): """Test cov rank estimation.""" # Test that our rank estimation works properly on a simple case evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=False) cov = read_cov(cov_fname) ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and ch.startswith('EEG')] cov = prepare_noise_cov(cov, evoked.info, ch_names, None) assert cov['eig'][0] <= 1e-25 # avg projector should set this to zero assert (cov['eig'][1:] > 1e-16).all() # all else should be > 0 # Now do some more comprehensive tests raw_sample = read_raw_fif(raw_fname) assert not _has_eeg_average_ref_proj(raw_sample.info['projs']) raw_sss = read_raw_fif(hp_fif_fname) assert not _has_eeg_average_ref_proj(raw_sss.info['projs']) raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg)) cov_sample = compute_raw_covariance(raw_sample) cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj()) cov_sss = compute_raw_covariance(raw_sss) cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list(itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)], )) for (cov, picks_list, iter_info), scalings in iter_tests: rank = compute_rank(cov, rank_method, scalings, iter_info, proj=proj) rank['all'] = sum(rank.values()) for ch_type, picks in picks_list: this_info = pick_info(iter_info, picks) # compute subset of projs, active and inactive n_projs_applied = sum(proj['active'] and len(set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in cov['projs']) n_projs_info = sum(len(set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in this_info['projs']) # count channel types ch_types = [channel_type(this_info, idx) for idx in range(len(picks))] n_eeg, n_mag, n_grad = [ch_types.count(k) for k in ['eeg', 'mag', 'grad']] n_meg = n_mag + n_grad has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0) if has_sss: n_meg = _get_rank_sss(this_info) expected_rank = n_meg + n_eeg if rank_method is None: if meg == 'combined' or not has_sss: if proj: expected_rank -= n_projs_info else: expected_rank -= n_projs_applied else: # XXX for now it just uses the total count assert rank_method == 'info' if proj: expected_rank -= n_projs_info assert rank[ch_type] == expected_rank
def noise_cov(): """Get a noise cov from the testing dataset.""" return mne.read_cov(fname_cov)
def test_cov_order(): """Test covariance ordering.""" raw = read_raw_fif(raw_fname) raw.set_eeg_reference(projection=True) info = raw.info # add MEG channel with low enough index number to affect EEG if # order is incorrect info['bads'] += ['MEG 0113'] ch_names = [ info['ch_names'][pick] for pick in pick_types(info, meg=False, eeg=True) ] cov = read_cov(cov_fname) # no avg ref present warning prepare_noise_cov(cov, info, ch_names, verbose='error') # big reordering cov_reorder = cov.copy() order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names))) cov_reorder['names'] = [cov['names'][ii] for ii in order] cov_reorder['data'] = cov['data'][order][:, order] # Make sure we did this properly _assert_reorder(cov_reorder, cov, order) # Now check some functions that should get the same result for both # regularize with pytest.raises(ValueError, match='rank, if str'): regularize(cov, info, rank='foo') with pytest.raises(TypeError, match='rank must be'): regularize(cov, info, rank=False) with pytest.raises(TypeError, match='rank must be'): regularize(cov, info, rank=1.) cov_reg = regularize(cov, info, rank='full') cov_reg_reorder = regularize(cov_reorder, info, rank='full') _assert_reorder(cov_reg_reorder, cov_reg, order) # prepare_noise_cov cov_prep = prepare_noise_cov(cov, info, ch_names) cov_prep_reorder = prepare_noise_cov(cov, info, ch_names) _assert_reorder(cov_prep, cov_prep_reorder, order=np.arange(len(cov_prep['names']))) # compute_whitener whitener, w_ch_names, n_nzero = compute_whitener(cov, info, return_rank=True) assert whitener.shape[0] == whitener.shape[1] whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(cov_reorder, info, return_rank=True) assert_array_equal(w_ch_names_2, w_ch_names) assert_allclose(whitener_2, whitener) assert n_nzero == n_nzero_2 # with pca assert n_nzero < whitener.shape[0] whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener( cov, info, pca=True, return_rank=True) assert_array_equal(w_ch_names_pca, w_ch_names) assert n_nzero_pca == n_nzero assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names)) # whiten_evoked evoked = read_evokeds(ave_fname)[0] evoked_white = whiten_evoked(evoked, cov) evoked_white_2 = whiten_evoked(evoked, cov_reorder) assert_allclose(evoked_white_2.data, evoked_white.data)
def test_cov_estimation_on_raw(method, tmpdir): """Test estimation from raw (typically empty room).""" if method == 'shrunk': try: import sklearn # noqa: F401 except Exception as exp: pytest.skip('sklearn is required, got %s' % (exp, )) raw = read_raw_fif(raw_fname, preload=True) cov_mne = read_cov(erm_cov_fname) method_params = dict(shrunk=dict(shrinkage=[0])) # The pure-string uses the more efficient numpy-based method, the # the list gets triaged to compute_covariance (should be equivalent # but use more memory) with pytest.warns(None): # can warn about EEG ref cov = compute_raw_covariance(raw, tstep=None, method=method, rank='full', method_params=method_params) assert_equal(cov.ch_names, cov_mne.ch_names) assert_equal(cov.nfree, cov_mne.nfree) assert_snr(cov.data, cov_mne.data, 1e6) # test equivalence with np.cov cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1) if method != 'shrunk': # can check all off_diag = np.triu_indices(cov_np.shape[0]) else: # We explicitly zero out off-diag entries between channel types, # so let's just check MEG off-diag entries off_diag = np.triu_indices( len(pick_types(raw.info, meg=True, exclude=()))) for other in (cov_mne, cov): assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6) assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3) assert_snr(cov.data, other.data, 1e6) # tstep=0.2 (default) with pytest.warns(None): # can warn about EEG ref cov = compute_raw_covariance(raw, method=method, rank='full', method_params=method_params) assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples assert_snr(cov.data, cov_mne.data, 170) # test IO when computation done in Python cov.save(tmpdir.join('test-cov.fif')) # test saving cov_read = read_cov(tmpdir.join('test-cov.fif')) assert cov_read.ch_names == cov.ch_names assert cov_read.nfree == cov.nfree assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels raw_pick = raw.copy().pick_channels(raw.ch_names[:5]) raw_pick.info.normalize_proj() cov = compute_raw_covariance(raw_pick, tstep=None, method=method, rank='full', method_params=method_params) assert cov_mne.ch_names[:5] == cov.ch_names assert_snr(cov.data, cov_mne.data[:5, :5], 5e6) cov = compute_raw_covariance(raw_pick, method=method, rank='full', method_params=method_params) assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps # make sure we get a warning with too short a segment raw_2 = read_raw_fif(raw_fname).crop(0, 1) with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_raw_covariance(raw_2, method=method, method_params=method_params) # no epochs found due to rejection pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None, method='empirical', reject=dict(eog=200e-6)) # but this should work with pytest.warns(None): # sklearn cov = compute_raw_covariance(raw.copy().crop(0, 10.), tstep=None, method=method, reject=dict(eog=1000e-6), method_params=method_params, verbose='error')
def test_apply_inverse_operator(): """Test MNE inverse computation (precomputed and non-precomputed) """ inverse_operator = read_inverse_operator(fname_inv) evoked = _get_evoked() noise_cov = read_cov(fname_cov) # Test old version of inverse computation starting from forward operator fwd_op = read_forward_solution(fname_fwd, surf_ori=True) my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=0.2, depth=0.8, limit_depth_chs=False) _compare_io(my_inv_op) assert_true(inverse_operator['units'] == 'Am') _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2, check_depth=False) # Inverse has 306 channels - 4 proj = 302 assert_true(compute_rank_inverse(inverse_operator) == 302) # Test MNE inverse computation starting from forward operator my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=0.2, depth=0.8) _compare_io(my_inv_op) _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2) # Inverse has 306 channels - 4 proj = 302 assert_true(compute_rank_inverse(inverse_operator) == 302) stc = apply_inverse(evoked, inverse_operator, lambda2, "MNE") assert_true(stc.subject == 'sample') assert_true(stc.data.min() > 0) assert_true(stc.data.max() < 10e-10) assert_true(stc.data.mean() > 1e-11) # test if using prepared and not prepared inverse operator give the same # result inv_op = prepare_inverse_operator(inverse_operator, nave=evoked.nave, lambda2=lambda2, method="MNE") stc2 = apply_inverse(evoked, inv_op, lambda2, "MNE") assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.times, stc2.times) stc = apply_inverse(evoked, inverse_operator, lambda2, "sLORETA") assert_true(stc.subject == 'sample') assert_true(stc.data.min() > 0) assert_true(stc.data.max() < 10.0) assert_true(stc.data.mean() > 0.1) stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM") assert_true(stc.subject == 'sample') assert_true(stc.data.min() > 0) assert_true(stc.data.max() < 35) assert_true(stc.data.mean() > 0.1) my_stc = apply_inverse(evoked, my_inv_op, lambda2, "dSPM") assert_true('dev_head_t' in my_inv_op['info']) assert_true('mri_head_t' in my_inv_op) assert_true(my_stc.subject == 'sample') assert_equal(stc.times, my_stc.times) assert_array_almost_equal(stc.data, my_stc.data, 2)
def test_apply_mne_inverse_fixed_raw(): """Test MNE with fixed-orientation inverse operator on Raw """ raw = Raw(fname_raw) start = 3 stop = 10 _, times = raw[0, start:stop] label_lh = read_label(fname_label % 'Aud-lh') # create a fixed-orientation inverse operator fwd = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True) noise_cov = read_cov(fname_cov) inv_op = make_inverse_operator(raw.info, fwd, noise_cov, loose=None, depth=0.8, fixed=True) inv_op2 = prepare_inverse_operator(inv_op, nave=1, lambda2=lambda2, method="dSPM") stc = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM", label=label_lh, start=start, stop=stop, nave=1, pick_ori=None, buffer_size=None, prepared=True) stc2 = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM", label=label_lh, start=start, stop=stop, nave=1, pick_ori=None, buffer_size=3, prepared=True) stc3 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM", label=label_lh, start=start, stop=stop, nave=1, pick_ori=None, buffer_size=None) assert_true(stc.subject == 'sample') assert_true(stc2.subject == 'sample') assert_array_almost_equal(stc.times, times) assert_array_almost_equal(stc2.times, times) assert_array_almost_equal(stc3.times, times) assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.data, stc3.data)
def test_localization_bias(): """Test inverse localization bias for minimum-norm solvers.""" # Identity input evoked = _get_evoked() evoked.pick_types(meg=True, eeg=True, exclude=()) evoked = EvokedArray(np.eye(len(evoked.data)), evoked.info) noise_cov = read_cov(fname_cov) # restrict to limited set of verts (small src here) and one hemi for speed fwd_orig = read_forward_solution(fname_fwd) vertices = [fwd_orig['src'][0]['vertno'].copy(), []] stc = SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0., 1.) fwd_orig = restrict_forward_to_stc(fwd_orig, stc) # # Fixed orientation (not very different) # fwd = fwd_orig.copy() inv_fixed = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0., depth=0.8) fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True) fwd = fwd['sol']['data'] want = np.arange(fwd.shape[1]) for method, lower, upper in (('MNE', 83, 87), ('dSPM', 96, 98), ('sLORETA', 100, 100), ('eLORETA', 100, 100)): inv_op = apply_inverse(evoked, inv_fixed, lambda2, method).data loc = np.abs(np.dot(inv_op, fwd)) # Compute the percentage of sources for which there is no localization # bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 assert lower <= perc <= upper, method # # Loose orientation # fwd = fwd_orig.copy() inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.2, depth=0.8) fwd = fwd['sol']['data'] want = np.arange(fwd.shape[1]) // 3 for method, lower, upper in (('MNE', 25, 35), ('dSPM', 25, 35), ('sLORETA', 35, 40), ('eLORETA', 40, 45)): inv_op = apply_inverse(evoked, inv_free, lambda2, method, pick_ori='vector').data loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1) # Compute the percentage of sources for which there is no localization # bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 assert lower <= perc <= upper, method # # Free orientation # fwd = fwd_orig.copy() inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=1., depth=0.8) fwd = fwd['sol']['data'] want = np.arange(fwd.shape[1]) // 3 force_kwargs = dict(method_params=dict(force_equal=True)) for method, lower, upper, kwargs in (('MNE', 45, 55, {}), ('dSPM', 40, 45, {}), ('sLORETA', 90, 95, {}), ('eLORETA', 90, 95, force_kwargs), ('eLORETA', 100, 100, {}), ): inv_op = apply_inverse(evoked, inv_free, lambda2, method, pick_ori='vector', **kwargs).data loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1) # Compute the percentage of sources for which there is no localization # bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 assert lower <= perc <= upper, method
def get_mne_sample(tmin=-0.1, tmax=0.4, baseline=(None, 0), sns=False, src=None, sub="modality=='A'", ori='free', snr=2, method='dSPM', rm=False, stc=False, hpf=0): """Load events and epochs from the MNE sample data Parameters ---------- tmin : scalar Relative time of the first sample of the epoch. tmax : scalar Relative time of the last sample of the epoch. baseline : {None, tuple of 2 {scalar, None}} Period for baseline correction. sns : bool | str Add sensor space data as NDVar as ``ds['meg']`` (default ``False``). Set to ``'grad'`` to load gradiometer data. src : False | 'ico' | 'vol' Add source space data as NDVar as ``ds['src']`` (default ``False``). sub : str | list | None Expression for subset of events to load. For a very small dataset use e.g. ``[0,1]``. ori : 'free' | 'fixed' | 'vector' Orientation of sources. snr : scalar MNE inverse parameter. method : str MNE inverse parameter. rm : bool Pretend to be a repeated measures dataset (adds 'subject' variable). stc : bool Add mne SourceEstimate for source space data as ``ds['stc']`` (default ``False``). hpf : scalar High pass filter cutoff. Returns ------- ds : Dataset Dataset with epochs from the MNE sample dataset in ``ds['epochs']``. """ if ori == 'free': loose = 1 fixed = False pick_ori = None elif ori == 'fixed': loose = 0 fixed = True pick_ori = None elif ori == 'vector': if LooseVersion(mne.__version__) < LooseVersion('0.17'): raise RuntimeError(f'mne version {mne.__version__}; vector source estimates require mne 0.17') loose = 1 fixed = False pick_ori = 'vector' else: raise ValueError(f"ori={ori!r}") data_dir = mne.datasets.sample.data_path() meg_dir = os.path.join(data_dir, 'MEG', 'sample') raw_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40_raw.fif') event_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40-eve.fif') subjects_dir = os.path.join(data_dir, 'subjects') subject = 'sample' label_path = os.path.join(subjects_dir, subject, 'label', '%s.label') if not os.path.exists(event_file): raw = mne.io.Raw(raw_file) events = mne.find_events(raw, stim_channel='STI 014') mne.write_events(event_file, events) ds = load.fiff.events(raw_file, events=event_file) if hpf: ds.info['raw'].load_data() ds.info['raw'].filter(hpf, None) ds.index() ds.info['subjects_dir'] = subjects_dir ds.info['subject'] = subject ds.info['label'] = label_path # get the trigger variable form the dataset for eaier access trigger = ds['trigger'] # use trigger to add various labels to the dataset ds['condition'] = Factor(trigger, labels={ 1: 'LA', 2: 'RA', 3: 'LV', 4: 'RV', 5: 'smiley', 32: 'button'}) ds['side'] = Factor(trigger, labels={ 1: 'L', 2: 'R', 3: 'L', 4: 'R', 5: 'None', 32: 'None'}) ds['modality'] = Factor(trigger, labels={ 1: 'A', 2: 'A', 3: 'V', 4: 'V', 5: 'None', 32: 'None'}) if rm: ds = ds.sub('trigger < 5') ds = ds.equalize_counts('side % modality') subject_f = ds.eval('side % modality').enumerate_cells() ds['subject'] = subject_f.as_factor('s%r', random=True) if sub: ds = ds.sub(sub) load.fiff.add_mne_epochs(ds, tmin, tmax, baseline) if sns: ds['meg'] = load.fiff.epochs_ndvar(ds['epochs'], data='mag' if sns is True else sns, sysname='neuromag') if not src: return ds elif src == 'ico': src_tag = 'ico-4' elif src == 'vol': src_tag = 'vol-10' else: raise ValueError(f"src={src!r}") epochs = ds['epochs'] # get inverse operator inv_file = os.path.join(meg_dir, f'sample_eelbrain_{src_tag}-inv.fif') if os.path.exists(inv_file): inv = mne.minimum_norm.read_inverse_operator(inv_file) else: fwd_file = os.path.join(meg_dir, 'sample-%s-fwd.fif' % src_tag) bem_dir = os.path.join(subjects_dir, subject, 'bem') bem_file = os.path.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif') trans_file = os.path.join(meg_dir, 'sample_audvis_raw-trans.fif') if os.path.exists(fwd_file): fwd = mne.read_forward_solution(fwd_file) else: src_ = _mne_source_space(subject, src_tag, subjects_dir) fwd = mne.make_forward_solution(epochs.info, trans_file, src_, bem_file) mne.write_forward_solution(fwd_file, fwd) cov_file = os.path.join(meg_dir, 'sample_audvis-cov.fif') cov = mne.read_cov(cov_file) inv = mn.make_inverse_operator(epochs.info, fwd, cov, loose=loose, depth=None, fixed=fixed) mne.minimum_norm.write_inverse_operator(inv_file, inv) ds.info['inv'] = inv stcs = mn.apply_inverse_epochs(epochs, inv, 1. / (snr ** 2), method, pick_ori=pick_ori) ds['src'] = load.fiff.stc_ndvar(stcs, subject, src_tag, subjects_dir, method, fixed) if stc: ds['stc'] = stcs return ds
def test_simulate_raw_sphere(): """Test simulation of raw data with sphere model.""" seed = 42 raw, src, stc, trans, sphere = _get_data() assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1) # head pos head_pos_sim = dict() # these will be at 1., 2., ... sec shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]] for time_key, shift in enumerate(shifts): # Create 4x4 matrix transform and normalize temp_trans = deepcopy(raw.info['dev_head_t']) temp_trans['trans'][:3, 3] += shift head_pos_sim[time_key + 1.] = temp_trans['trans'] # # Test raw simulation with basic parameters # raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname), head_pos=head_pos_sim, blink=True, ecg=True, random_state=seed) raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere, cov_fname, head_pos=head_pos_sim, blink=True, ecg=True, random_state=seed) assert_array_equal(raw_sim_2[:][0], raw_sim[:][0]) # Test IO on processed data tempdir = _TempDir() test_outname = op.join(tempdir, 'sim_test_raw.fif') raw_sim.save(test_outname) raw_sim_loaded = read_raw_fif(test_outname, preload=True) assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20) del raw_sim, raw_sim_2 # with no cov (no noise) but with artifacts, most time periods should match # but the EOG/ECG channels should not for ecg, eog in ((True, False), (False, True), (True, True)): raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere, cov=None, head_pos=head_pos_sim, blink=eog, ecg=ecg, random_state=seed) raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere, cov=None, head_pos=head_pos_sim, blink=False, ecg=False, random_state=seed) picks = np.arange(len(raw.ch_names)) diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog) these_picks = np.setdiff1d(picks, diff_picks) close = np.isclose(raw_sim_3[these_picks][0], raw_sim_4[these_picks][0], atol=1e-20) assert_true(np.mean(close) > 0.7) far = ~np.isclose( raw_sim_3[diff_picks][0], raw_sim_4[diff_picks][0], atol=1e-20) assert_true(np.mean(far) > 0.99) del raw_sim_3, raw_sim_4 # make sure it works with EEG-only and MEG-only raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False), stc, trans, src, sphere, cov=None, ecg=True, blink=True, random_state=seed) raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True), stc, trans, src, sphere, cov=None, ecg=True, blink=True, random_state=seed) raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True), stc, trans, src, sphere, cov=None, ecg=True, blink=True, random_state=seed) assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])), raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20) del raw_sim_meg, raw_sim_eeg, raw_sim_meeg # check that different interpolations are similar given small movements raw_sim = simulate_raw(raw, stc, trans, src, sphere, cov=None, head_pos=head_pos_sim, interp='linear') raw_sim_hann = simulate_raw(raw, stc, trans, src, sphere, cov=None, head_pos=head_pos_sim, interp='hann') assert_allclose(raw_sim[:][0], raw_sim_hann[:][0], rtol=1e-1, atol=1e-14) del raw_sim, raw_sim_hann # Make impossible transform (translate up into helmet) and ensure failure head_pos_sim_err = deepcopy(head_pos_sim) head_pos_sim_err[1.][2, 3] -= 0.1 # z trans upward 10cm assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere, ecg=False, blink=False, head_pos=head_pos_sim_err) assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, bem_fname, ecg=False, blink=False, head_pos=head_pos_sim_err) # other degenerate conditions assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere) assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere) assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0), trans, src, sphere) stc_bad = stc.copy() stc_bad.tstep += 0.1 assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere) assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere, chpi=True) # no cHPI info assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere, interp='foo') assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere, head_pos=1.) assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere, head_pos=pos_fname) # ends up with t>t_end head_pos_sim_err = deepcopy(head_pos_sim) head_pos_sim_err[-1.] = head_pos_sim_err[1.] # negative time assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere, head_pos=head_pos_sim_err) raw_bad = raw.copy() raw_bad.info['dig'] = None assert_raises(RuntimeError, simulate_raw, raw_bad, stc, trans, src, sphere, blink=True)
parameters["cov_mx_pre"], "-epo.fif", wp=True)[2] epochs_files.sort() fwd_solution = files.get_files(subject_meg, "", "-fwd.fif")[2] fwd_solution.sort() noise_cov = files.get_files(subject_meg, parameters["cov_mx_pre"], "-cov.fif")[2] noise_cov.sort() all_files = list(zip(noise_cov, fwd_solution, epochs_files)) for cov_path, fwd_path, epo_path in all_files: epochs = mne.read_epochs(epo_path) cov = mne.read_cov(cov_path) fwd = mne.read_forward_solution(fwd_path) info = epochs.info inverse_operator = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8) inv_path = op.join( subject_meg, "{0}-{1}-inv.fif".format(parameters["cov_mx_pre"], cov_path.split("/")[-1].split("-")[-2])) mne.minimum_norm.write_inverse_operator(inv_path, inverse_operator) named_tuple = time.localtime() # get struct_time
def test_resolution_metrics(): """Test resolution metrics.""" fwd = mne.read_forward_solution(fname_fwd) # forward operator with fixed source orientations fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, copy=False) # noise covariance matrix noise_cov = mne.read_cov(fname_cov) # evoked data for info evoked = mne.read_evokeds(fname_evoked, 0) # fixed source orientation inv = mne.minimum_norm.make_inverse_operator(info=evoked.info, forward=fwd, noise_cov=noise_cov, loose=0., depth=None, fixed=True) # regularisation parameter based on SNR snr = 3.0 lambda2 = 1.0 / snr**2 # resolution matrices for fixed source orientation # compute resolution matrix for MNE rm_mne = make_inverse_resolution_matrix(fwd, inv, method='MNE', lambda2=lambda2) # compute very smooth MNE rm_mne_smooth = make_inverse_resolution_matrix(fwd, inv, method='MNE', lambda2=100.) # compute resolution matrix for sLORETA rm_lor = make_inverse_resolution_matrix(fwd, inv, method='sLORETA', lambda2=lambda2) # Compute localisation error (STCs) # Peak le_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='peak_err') le_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf', metric='peak_err') le_lor_psf = resolution_metrics(rm_lor, fwd['src'], function='psf', metric='peak_err') # Centre-of-gravity cog_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='cog_err') cog_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf', metric='cog_err') # Compute spatial spread (STCs) # Spatial deviation sd_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='sd_ext') sd_mne_psf_smooth = resolution_metrics(rm_mne_smooth, fwd['src'], function='psf', metric='sd_ext') sd_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf', metric='sd_ext') sd_lor_ctf = resolution_metrics(rm_lor, fwd['src'], function='ctf', metric='sd_ext') # Maximum radius mr_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='maxrad_ext', threshold=0.6) mr_mne_psf_smooth = resolution_metrics(rm_mne_smooth, fwd['src'], function='psf', metric='maxrad_ext', threshold=0.6) mr_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf', metric='maxrad_ext', threshold=0.6) mr_lor_ctf = resolution_metrics(rm_lor, fwd['src'], function='ctf', metric='maxrad_ext', threshold=0.6) # lower threshold -> larger spatial extent mr_mne_psf_0 = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='maxrad_ext', threshold=0.) mr_mne_psf_9 = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='maxrad_ext', threshold=0.9) # Compute relative amplitude (STCs) ra_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf', metric='peak_amp') ra_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf', metric='peak_amp') # Tests with pytest.raises(ValueError, match='is not a recognized metric'): resolution_metrics(rm_mne, fwd['src'], function='psf', metric='foo') with pytest.raises(ValueError, match='a recognised resolution function'): resolution_metrics(rm_mne, fwd['src'], function='foo', metric='peak_err') # For MNE: PLE for PSF and CTF equal? assert_array_almost_equal(le_mne_psf.data, le_mne_ctf.data) assert_array_almost_equal(cog_mne_psf.data, cog_mne_ctf.data) # For MNE: SD and maxrad for PSF and CTF equal? assert_array_almost_equal(sd_mne_psf.data, sd_mne_ctf.data) assert_array_almost_equal(mr_mne_psf.data, mr_mne_ctf.data) assert_((mr_mne_psf_0.data > mr_mne_psf_9.data).all()) # For MNE: RA for PSF and CTF equal? assert_array_almost_equal(ra_mne_psf.data, ra_mne_ctf.data) # Zero PLE for sLORETA? assert_((le_lor_psf.data == 0.).all()) # Spatial deviation and maxrad of CTFs for MNE and sLORETA equal? assert_array_almost_equal(sd_mne_ctf.data, sd_lor_ctf.data) assert_array_almost_equal(mr_mne_ctf.data, mr_lor_ctf.data) # Smooth MNE has larger spatial extent? assert_(np.sum(sd_mne_psf_smooth.data) > np.sum(sd_mne_psf.data)) assert_(np.sum(mr_mne_psf_smooth.data) > np.sum(mr_mne_psf.data)) # test "rectification" of resolution matrix r1 = np.ones([8, 4]) r2 = _rectify_resolution_matrix(r1) assert_array_equal(r2, np.sqrt(2) * np.ones((4, 4)))
def test_mxne_inverse_standard(): """Test (TF-)MxNE inverse computation.""" # Read noise covariance matrix cov = read_cov(fname_cov) # Handling average file loose = 0.0 depth = 0.9 evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) evoked.crop(tmin=-0.05, tmax=0.2) evoked_l21 = evoked.copy() evoked_l21.crop(tmin=0.081, tmax=0.1) label = read_label(fname_label) assert label.hemi == 'rh' forward = read_forward_solution(fname_fwd) forward = convert_forward_solution(forward, surf_ori=True) # Reduce source space to make test computation faster inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov, loose=loose, depth=depth, fixed=True, use_cps=True) stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9., method='dSPM') stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0 stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1. weights_min = 0.5 # MxNE tests alpha = 70 # spatial regularization parameter stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, weights=stc_dspm, weights_min=weights_min, solver='prox') with pytest.warns(None): # CD stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, weights=stc_dspm, weights_min=weights_min, solver='cd', pca=False) # pca=False deprecated, doesn't matter stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, weights=stc_dspm, weights_min=weights_min, solver='bcd') assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5) assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5) assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5) assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0) assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0) assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0) assert stc_prox.vertices[1][0] in label.vertices assert stc_cd.vertices[1][0] in label.vertices assert stc_bcd.vertices[1][0] in label.vertices with pytest.warns(None): # CD dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, weights=stc_dspm, weights_min=weights_min, solver='cd', return_as_dipoles=True) stc_dip = make_stc_from_dipoles(dips, forward['src']) assert isinstance(dips[0], Dipole) assert stc_dip.subject == "sample" _check_stcs(stc_cd, stc_dip) with pytest.warns(None): # CD stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, return_residual=True, solver='cd') assert_array_almost_equal(stc.times, evoked_l21.times, 5) assert stc.vertices[1][0] in label.vertices # irMxNE tests with pytest.warns(None): # CD stc = mixed_norm(evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=loose, depth=depth, maxit=300, tol=1e-8, active_set_size=10, solver='cd') assert_array_almost_equal(stc.times, evoked_l21.times, 5) assert stc.vertices[1][0] in label.vertices assert stc.vertices == [[63152], [79017]] # Do with TF-MxNE for test memory savings alpha = 60. # overall regularization parameter l1_ratio = 0.01 # temporal regularization proportion stc, _ = tf_mixed_norm(evoked, forward, cov, loose=loose, depth=depth, maxit=100, tol=1e-4, tstep=4, wsize=16, window=0.1, weights=stc_dspm, weights_min=weights_min, return_residual=True, alpha=alpha, l1_ratio=l1_ratio) assert_array_almost_equal(stc.times, evoked.times, 5) assert stc.vertices[1][0] in label.vertices pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, alpha=101, l1_ratio=0.03) pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, alpha=50., l1_ratio=1.01)
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif' cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif' # Read the evoked response and crop it condition = 'Left visual' evoked = mne.read_evokeds(evoked_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=-50e-3, tmax=300e-3) # Read the forward solution forward = mne.read_forward_solution(fwd_fname) # Read noise noise covariance matrix and regularize it cov = mne.read_cov(cov_fname) cov = mne.cov.regularize(cov, evoked.info, rank=None) # Run the Gamma-MAP method with dipole output alpha = 0.5 dipoles, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True, return_residual=True, return_as_dipoles=True) # %% # Plot dipole activations plot_dipole_amplitudes(dipoles)
def test_apply_inverse_operator(evoked, inv, min_, max_): """Test MNE inverse application.""" # use fname_inv as it will be faster than fname_full (fewer verts and chs) inverse_operator = read_inverse_operator(inv) # Inverse has 306 channels - 4 proj = 302 assert (compute_rank_inverse(inverse_operator) == 302) # Inverse has 306 channels - 4 proj = 302 assert (compute_rank_inverse(inverse_operator) == 302) stc = apply_inverse(evoked, inverse_operator, lambda2, "MNE") assert stc.subject == 'sample' assert stc.data.min() > min_ assert stc.data.max() < max_ assert abs(stc).data.mean() > 1e-11 # test if using prepared and not prepared inverse operator give the same # result inv_op = prepare_inverse_operator(inverse_operator, nave=evoked.nave, lambda2=lambda2, method="MNE") stc2 = apply_inverse(evoked, inv_op, lambda2, "MNE") assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.times, stc2.times) # This is little more than a smoke test... stc = apply_inverse(evoked, inverse_operator, lambda2, "sLORETA") assert stc.subject == 'sample' assert abs(stc).data.min() > 0 assert 2 < stc.data.max() < 7 assert abs(stc).data.mean() > 0.1 stc = apply_inverse(evoked, inverse_operator, lambda2, "eLORETA") assert stc.subject == 'sample' assert abs(stc).data.min() > min_ assert stc.data.max() < max_ * 2 assert abs(stc).data.mean() > 1e-11 stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM") assert stc.subject == 'sample' assert abs(stc).data.min() > 0 assert 7.5 < stc.data.max() < 15 assert abs(stc).data.mean() > 0.1 # test without using a label (so delayed computation is used) label = read_label(fname_label % 'Aud-lh') for method in INVERSE_METHODS: stc = apply_inverse(evoked, inv_op, lambda2, method) stc_label = apply_inverse(evoked, inv_op, lambda2, method, label=label) assert_equal(stc_label.subject, 'sample') label_stc = stc.in_label(label) assert label_stc.subject == 'sample' assert_allclose(stc_label.data, label_stc.data) # Test that no errors are raised with loose inverse ops and picking normals noise_cov = read_cov(fname_cov) fwd = read_forward_solution_meg(fname_fwd) inv_op_meg = make_inverse_operator( evoked.info, fwd, noise_cov, loose=1, fixed='auto', depth=None) apply_inverse(evoked, inv_op_meg, 1 / 9., method='MNE', pick_ori='normal') # Test we get errors when using custom ref or no average proj is present evoked.info['custom_ref_applied'] = True pytest.raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE") evoked.info['custom_ref_applied'] = False evoked.info['projs'] = [] # remove EEG proj pytest.raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE") # But test that we do not get EEG-related errors on MEG-only inv (gh-4650) apply_inverse(evoked, inv_op_meg, 1. / 9.)