def test_cov_ctf(): """Test basic cov computation on ctf data with/without compensation.""" raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data() events = make_fixed_length_events(raw, 99999) assert len(events) == 2 ch_names = [raw.info['ch_names'][pick] for pick in pick_types(raw.info, meg=True, eeg=False, ref_meg=False)] for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = compute_covariance(epochs, tmax=0., method=['empirical']) prepare_noise_cov(noise_cov, raw.info, ch_names) raw.apply_gradient_compensation(0) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = compute_covariance(epochs, tmax=0., method=['empirical']) raw.apply_gradient_compensation(1) # TODO This next call in principle should fail. prepare_noise_cov(noise_cov, raw.info, ch_names) # make sure comps matrices was not removed from raw assert raw.info['comps'], 'Comps matrices removed'
def _get_data(): """Read in data used in tests.""" # read forward model forward = mne.read_forward_solution(fname_fwd) # read data raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.read_events(fname_event) event_id, tmin, tmax = 1, -0.1, 0.15 # decimate for speed left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, selection=left_temporal_channels) picks = picks[::2] raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info.normalize_proj() # avoid projection warnings epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0.), preload=True, reject=reject) noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15) return epochs, data_cov, noise_cov, forward
def test_compute_covariance_auto_reg(): """Test automated regularization""" raw = Raw(raw_fname, preload=False) events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(mag=4e-12) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) picks = pick_types(raw.info, meg='mag', eeg=False) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, picks=picks[:5], baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) epochs.crop(None, 0)[:10] method_params = dict(factor_analysis=dict(iter_n_components=[30]), pca=dict(iter_n_components=[30])) with warnings.catch_warnings(record=True) as w: covs = compute_covariance(epochs, method='auto', method_params=method_params, projs=True, return_estimators=True) warnings.simplefilter('always') assert_equal(len(w), 1) logliks = [c['loglik'] for c in covs] assert_true(np.diff(logliks).max() <= 0) # descending order methods = ['empirical', 'factor_analysis', 'ledoit_wolf', # 'pca', XXX FAILS ] with warnings.catch_warnings(record=True) as w: cov3 = compute_covariance(epochs, method=methods, method_params=method_params, projs=False, return_estimators=True) warnings.simplefilter('always') assert_equal(len(w), 1) assert_equal(set([c['method'] for c in cov3]), set(methods)) # projs not allowed with FA or PCA assert_raises(ValueError, compute_covariance, epochs, method='pca', projs=True) # invalid prespecified method assert_raises(ValueError, compute_covariance, epochs, method='pizza') # invalid scalings assert_raises(ValueError, compute_covariance, epochs, method='shrunk', scalings=dict(misc=123))
def test_cov_mismatch(): """Test estimation with MEG<->Head mismatch.""" raw = read_raw_fif(raw_fname).crop(0, 5).load_data() events = find_events(raw, stim_channel='STI 014') raw.pick_channels(raw.ch_names[:5]) raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True) for kind in ('shift', 'None'): epochs_2 = epochs.copy() # This should be fine with warnings.catch_warnings(record=True) as w: compute_covariance([epochs, epochs_2]) assert_equal(len(w), 0) if kind == 'shift': epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001 else: # None epochs_2.info['dev_head_t'] = None assert_raises(ValueError, compute_covariance, [epochs, epochs_2]) assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch='ignore') assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch='warn') assert_raises(ValueError, compute_covariance, epochs, on_mismatch='x') assert_true(any('transform mismatch' in str(ww.message) for ww in w)) # This should work epochs.info['dev_head_t'] = None epochs_2.info['dev_head_t'] = None compute_covariance([epochs, epochs_2], method=None)
def test_cov_mismatch(): """Test estimation with MEG<->Head mismatch.""" raw = read_raw_fif(raw_fname, add_eeg_ref=False).crop(0, 5).load_data() events = find_events(raw, stim_channel="STI 014") raw.pick_channels(raw.ch_names[:5]) raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0.0, preload=True, add_eeg_ref=False) for kind in ("shift", "None"): epochs_2 = epochs.copy() # This should be fine with warnings.catch_warnings(record=True) as w: compute_covariance([epochs, epochs_2]) assert_equal(len(w), 0) if kind == "shift": epochs_2.info["dev_head_t"]["trans"][:3, 3] += 0.001 else: # None epochs_2.info["dev_head_t"] = None assert_raises(ValueError, compute_covariance, [epochs, epochs_2]) assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch="ignore") assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch="warn") assert_raises(ValueError, compute_covariance, epochs, on_mismatch="x") assert_true(any("transform mismatch" in str(ww.message) for ww in w)) # This should work epochs.info["dev_head_t"] = None epochs_2.info["dev_head_t"] = None compute_covariance([epochs, epochs_2], method=None)
def test_cov_mismatch(): """Test estimation with MEG<->Head mismatch.""" raw = read_raw_fif(raw_fname).crop(0, 5).load_data() events = find_events(raw, stim_channel='STI 014') raw.pick_channels(raw.ch_names[:5]) raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True) for kind in ('shift', 'None'): epochs_2 = epochs.copy() # This should be fine compute_covariance([epochs, epochs_2]) if kind == 'shift': epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001 else: # None epochs_2.info['dev_head_t'] = None pytest.raises(ValueError, compute_covariance, [epochs, epochs_2]) compute_covariance([epochs, epochs_2], on_mismatch='ignore') with pytest.raises(RuntimeWarning, match='transform mismatch'): compute_covariance([epochs, epochs_2], on_mismatch='warn') pytest.raises(ValueError, compute_covariance, epochs, on_mismatch='x') # This should work epochs.info['dev_head_t'] = None epochs_2.info['dev_head_t'] = None compute_covariance([epochs, epochs_2], method=None)
def test_cov_estimation_with_triggers(): """Estimate raw with triggers """ raw = Raw(raw_fname) events = find_events(raw) event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) cov_mne = read_cov(cov_km_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05) # cov using a list of epochs and keep_sample_mean=True epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) cov_mne = read_cov(cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5)
def test_compute_covariance_auto_reg(): """Test automated regularization""" raw = read_raw_fif(raw_fname, preload=True) raw.resample(100, npad='auto') # much faster estimation events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(mag=4e-12) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) # we need a few channels for numerical reasons in PCA/FA picks = pick_types(raw.info, meg='mag', eeg=False)[:10] raw.pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() epochs = Epochs( raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) epochs = epochs.crop(None, 0)[:10] method_params = dict(factor_analysis=dict(iter_n_components=[3]), pca=dict(iter_n_components=[3])) covs = compute_covariance(epochs, method='auto', method_params=method_params, projs=True, return_estimators=True) logliks = [c['loglik'] for c in covs] assert_true(np.diff(logliks).max() <= 0) # descending order methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'pca'] cov3 = compute_covariance(epochs, method=methods, method_params=method_params, projs=None, return_estimators=True) assert_equal(set([c['method'] for c in cov3]), set(methods)) # invalid prespecified method assert_raises(ValueError, compute_covariance, epochs, method='pizza') # invalid scalings assert_raises(ValueError, compute_covariance, epochs, method='shrunk', scalings=dict(misc=123))
def _get_bf_data(save_fieldtrip=False): raw, epochs, evoked, data_cov, _, _, _, _, _, fwd = _get_data(proj=False) if save_fieldtrip is True: # raw needs to be saved with all channels and picked in FieldTrip raw.save(op.join(ft_data_path, 'raw.fif'), overwrite=True) # src (tris are not available in fwd['src'] once imported into MATLAB) src = fwd['src'].copy() mne.write_source_spaces(op.join(ft_data_path, 'src.fif'), src) # pick gradiometers only: epochs.pick_types(meg='grad') evoked.pick_types(meg='grad') # compute covariance matrix data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145, method='empirical') if save_fieldtrip is True: # if the covariance matrix and epochs need resaving: # data covariance: cov_savepath = op.join(ft_data_path, 'sample_cov') sample_cov = {'sample_cov': data_cov['data']} savemat(cov_savepath, sample_cov) # evoked data: ev_savepath = op.join(ft_data_path, 'sample_evoked') data_ev = {'sample_evoked': evoked.data} savemat(ev_savepath, data_ev) return evoked, data_cov, fwd
def test_lcmv(): """Test LCMV """ event_id, tmin, tmax = 1, -0.2, 0.2 # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set up pick list: EEG + MEG - bad channels (modify to your needs) left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude=raw.info['bads'], selection=left_temporal_channels) # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) evoked = epochs.average() noise_cov = mne.read_cov(fname_cov) noise_cov = mne.cov.regularize(noise_cov, evoked.info, mag=0.05, grad=0.05, eeg=0.1, proj=True) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01) stc_pow = np.sum(stc.data, axis=1) idx = np.argmax(stc_pow) max_stc = stc.data[idx] tmax = stc.times[np.argmax(max_stc)] assert_true(0.09 < tmax < 0.1) assert_true(2. < np.max(max_stc) < 3.)
def run_evoked(subject_id): subject = "sub%03d" % subject_id print("processing subject: %s" % subject) data_path = op.join(meg_dir, subject) epochs = mne.read_epochs(op.join(data_path, '%s-epo.fif' % subject), preload=False) evoked_famous = epochs['face/famous'].average() evoked_scrambled = epochs['scrambled'].average() evoked_unfamiliar = epochs['face/unfamiliar'].average() # Simplify comment evoked_famous.comment = 'famous' evoked_scrambled.comment = 'scrambled' evoked_unfamiliar.comment = 'unfamiliar' contrast = mne.combine_evoked([evoked_famous, evoked_unfamiliar, evoked_scrambled], weights=[0.5, 0.5, -1.]) contrast.comment = 'contrast' faces = mne.combine_evoked([evoked_famous, evoked_unfamiliar], 'nave') faces.comment = 'faces' mne.evoked.write_evokeds(op.join(data_path, '%s-ave.fif' % subject), [evoked_famous, evoked_scrambled, evoked_unfamiliar, contrast, faces]) # take care of noise cov cov = mne.compute_covariance(epochs, tmax=0, method='shrunk') cov.save(op.join(data_path, '%s-cov.fif' % subject))
def test_lcmv_reg_proj(proj): """Test LCMV with and without proj.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.find_events(raw) raw.pick_types() assert len(raw.ch_names) == 305 epochs = mne.Epochs(raw, events, None, preload=True, proj=proj) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = mne.compute_covariance(epochs, tmax=0) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) forward = mne.read_forward_solution(fname_fwd) filters = make_lcmv(epochs.info, forward, data_cov, reg=0.05, noise_cov=noise_cov, pick_ori='max-power', weight_norm='nai', rank=None, verbose=True) want_rank = 302 # 305 good channels - 3 MEG projs assert filters['rank'] == want_rank
def test_low_rank_methods(rank, raw_epochs_events): """Test low-rank covariance matrix estimation.""" epochs = raw_epochs_events[1] sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj n_ch = 366 methods = ('empirical', 'diagonal_fixed', 'oas') bounds = { 'None': dict(empirical=(-15000, -5000), diagonal_fixed=(-1500, -500), oas=(-700, -600)), 'full': dict(empirical=(-18000, -8000), diagonal_fixed=(-2000, -1600), oas=(-1600, -1000)), 'info': dict(empirical=(-15000, -5000), diagonal_fixed=(-700, -600), oas=(-700, -600)), } with pytest.warns(RuntimeWarning, match='Too few samples'): covs = compute_covariance( epochs, method=methods, return_estimators=True, rank=rank, verbose=True) for cov in covs: method = cov['method'] these_bounds = bounds[str(rank)][method] this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full')) if rank == 'full' and method != 'empirical': assert this_rank == n_ch else: assert this_rank == sss_proj_rank assert these_bounds[0] < cov['loglik'] < these_bounds[1], \ (rank, method)
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = _read_forward_solution_meg( fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg( fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, ref_meg=False, exclude='bads', selection=left_temporal_channels) raw.pick_channels([raw.ch_names[ii] for ii in picks]) raw.info.normalize_proj() # avoid projection warnings if epochs: # Read epochs epochs = mne.Epochs( raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=epochs_preload, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) if epochs_preload: epochs.resample(200, npad=0, n_jobs=2) epochs.crop(0, None) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov['projs'] = [] # avoid warning with warnings.catch_warnings(record=True): # bad proj noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True) if data_cov: with warnings.catch_warnings(record=True): # too few samples data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145) else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
def calc_inverse_operator(events_id, epochs_fn, fwd_sub_fn, inv_fn, min_crop_t=None, max_crop_t=0): for cond in events_id.keys(): epochs = mne.read_epochs(epochs_fn.format(cond=cond)) noise_cov = mne.compute_covariance(epochs.crop(min_crop_t, max_crop_t, copy=True)) forward_sub = mne.read_forward_solution(fwd_sub_fn.format(cond=cond)) inverse_operator_sub = make_inverse_operator(epochs.info, forward_sub, noise_cov, loose=None, depth=None) write_inverse_operator(inv_fn.format(cond=cond), inverse_operator_sub)
def compute_epochs_cov_evokeds(subject): """Epoch, compute noise covariance and average. params: subject : str the subject id to be loaded """ raw = Raw(save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject, preload=True) # Select events to extract epochs from. event_id = {'ent_left': 1, 'ent_right': 2, 'ctl_left': 4, 'ctl_right': 8} # Setup for reading the raw data events = mne.find_events(raw, min_duration=0.01) picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=False, include=include, exclude='bads') # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject, preload=True) epochs.save(epochs_folder + "%s_filtered_ica_mc_tsss-epo.fif" % subject) # Plot epochs. # epochs.plot(trellis=False) # Look at channels that caused dropped events, showing that the subject's # blinks were likely to blame for most epochs being dropped epochs.drop_bad_epochs() fig = epochs.plot_drop_log(subject=subject, show=False) fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject) # Make noise cov cov = compute_covariance(epochs, tmin=None, tmax=0, method="auto") mne.write_cov(epochs_folder + "%s-cov.fif" % subject, cov) # Average epochs and get evoked data corresponding to the left stimulation ########################################################################### # Save evoked responses for different conditions to disk # average epochs and get Evoked datasets evokeds = [epochs[cond].average() for cond in ['ent_left', 'ent_right', 'ctl_left', 'ctl_right']] evokeds = [epochs[cond].average() for cond in epochs.event_id.keys()] # save evoked data to disk mne.write_evokeds(epochs_folder + '%s_filtered_ica_mc_raw_tsss-ave.fif' % subject, evokeds) plt.close("all")
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True): """Read in data used in tests """ label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.fiff.Raw(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True) forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True) forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels if epochs: # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, ref_meg=False, exclude='bads', selection=left_temporal_channels) # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=epochs_preload, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) if epochs_preload: epochs.resample(200, npad=0, n_jobs=2) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True) if data_cov: data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
def test_lcmv(): """Test LCMV with evoked data and single trials """ event_id, tmin, tmax = 1, -0.1, 0.15 # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set up pick list: EEG + MEG - bad channels (modify to your needs) left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads', selection=left_temporal_channels) # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) epochs.resample(200, npad=0, n_jobs=2) evoked = epochs.average() noise_cov = mne.read_cov(fname_cov) noise_cov = mne.cov.regularize(noise_cov, evoked.info, mag=0.05, grad=0.05, eeg=0.1, proj=True) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01) stc_pow = np.sum(stc.data, axis=1) idx = np.argmax(stc_pow) max_stc = stc.data[idx] tmax = stc.times[np.argmax(max_stc)] assert_true(0.09 < tmax < 0.1) assert_true(2. < np.max(max_stc) < 3.) # Now test single trial using fixed orientation forward solution # so we can compare it to the evoked solution forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True) stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01) epochs.drop_bad_epochs() assert_true(len(epochs.events) == len(stcs)) # average the single trial estimates stc_avg = np.zeros_like(stc.data) for this_stc in stcs: stc_avg += this_stc.data stc_avg /= len(stcs) # compare it to the solution using evoked with fixed orientation stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01) assert_array_almost_equal(stc_avg, stc_fixed.data)
def test_cov_estimation_with_triggers(): """Test estimation from raw with triggers """ events = find_events(raw) event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) cov_mne = read_cov(cov_km_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05) # cov using a list of epochs and keep_sample_mean=True epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) cov_mne = read_cov(cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5) # cov with list of epochs with different projectors epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject), Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False, reject=reject)] # these should fail assert_raises(ValueError, compute_covariance, epochs) assert_raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above cov = compute_covariance(epochs, projs=epochs[0].info['projs']) cov = compute_covariance(epochs, projs=[])
def localize_epochs(epochs, fwd, reg=0): ''' Returns a list of Sourceestimates, one per Epoch ''' cov = mne.compute_covariance(epochs) weights = calculate_weights(fwd, cov, reg=reg) stcs = [] print 'Multiplying data by beamformer weights...' for epoch in epochs: sol = np.dot(weights, epoch) src = mne.SourceEstimate(sol, [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']], epochs.tmin, epochs.times[1] - epochs.times[0]) stcs.append(src) return stcs
def test_cov_estimation_with_triggers(): """Estimate raw with triggers """ raw = Raw(raw_fname) events = mne.find_events(raw) event_ids = [1, 2, 3, 4] cov = mne.compute_covariance(raw, events, event_ids, tmin=-0.2, tmax=0, reject=dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6), keep_sample_mean=True) cov_mne = mne.Covariance(cov_fname) assert cov_mne.ch_names == cov.ch_names assert (linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.05
def compute_bias(raw): events = find_events(raw, 'STI201', verbose=False) events = events[1:] # first one has an artifact tmin, tmax = -0.2, 0.1 epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax, baseline=(None, -0.01), preload=True, verbose=False) sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None, verbose=False) cov = mne.compute_covariance(epochs, tmax=0, method='oas', rank=None, verbose=False) idx = epochs.time_as_index(0.036)[0] data = epochs.get_data()[:, :, idx].T evoked = mne.EvokedArray(data, epochs.info, tmin=0.) dip = fit_dipole(evoked, cov, sphere, n_jobs=1, verbose=False)[0] actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1] misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1) return misses
def test_add_noise(): """Test noise addition.""" rng = np.random.RandomState(0) data_path = testing.data_path() raw = read_raw_fif(data_path + '/MEG/sample/sample_audvis_trunc_raw.fif') raw.del_proj() picks = pick_types(raw.info, eeg=True, exclude=()) cov = compute_raw_covariance(raw, picks=picks) with pytest.raises(RuntimeError, match='to be loaded'): add_noise(raw, cov) raw.crop(0, 1).load_data() with pytest.raises(TypeError, match='Raw, Epochs, or Evoked'): add_noise(0., cov) with pytest.raises(TypeError, match='Covariance'): add_noise(raw, 0.) # test a no-op (data preserved) orig_data = raw[:][0] zero_cov = cov.copy() zero_cov['data'].fill(0) add_noise(raw, zero_cov) new_data = raw[:][0] assert_allclose(orig_data, new_data, atol=1e-30) # set to zero to make comparisons easier raw._data[:] = 0. epochs = EpochsArray(np.zeros((1, len(raw.ch_names), 100)), raw.info.copy()) epochs.info['bads'] = [] evoked = epochs.average(picks=np.arange(len(raw.ch_names))) for inst in (raw, epochs, evoked): with catch_logging() as log: add_noise(inst, cov, random_state=rng, verbose=True) log = log.getvalue() want = ('to {0}/{1} channels ({0}' .format(len(cov['names']), len(raw.ch_names))) assert want in log if inst is evoked: inst = EpochsArray(inst.data[np.newaxis], inst.info) if inst is raw: cov_new = compute_raw_covariance(inst, picks=picks, verbose='error') # samples else: cov_new = compute_covariance(inst, verbose='error') # avg ref assert cov['names'] == cov_new['names'] r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1] assert r > 0.99
def _run_interface(self, runtime): raw_filename = self.inputs.raw_filename cov_fname_in = self.inputs.cov_fname_in is_epoched = self.inputs.is_epoched is_evoked = self.inputs.is_evoked events_id = self.inputs.events_id t_min = self.inputs.t_min t_max = self.inputs.t_max if cov_fname_in == '' or not op.exists(cov_fname_in): if is_epoched and is_evoked: raw = Raw(raw_filename) events = find_events(raw) data_path, basename, ext = split_f(raw.info['filename']) self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename) if not op.exists(self.cov_fname_out): print '\n*** COMPUTE COV FROM EPOCHS ***\n' + self.cov_fname_out reject = create_reject_dict(raw.info) picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads') epochs = Epochs(raw, events, events_id, t_min, t_max, picks=picks, baseline=(None, 0), reject=reject) # TODO method='auto'? too long!!! noise_cov = compute_covariance(epochs, tmax=0, method='diagonal_fixed') write_cov(self.cov_fname_out, noise_cov) else: print '\n *** NOISE cov file %s exists!!! \n' % self.cov_fname_out else: '\n *** NO EPOCH DATA \n' else: print '\n *** NOISE cov file %s exists!!! \n' % cov_fname_in self.cov_fname_out = cov_fname_in return runtime
def test_lcmv_ctf_comp(): """Test interpolation with compensated CTF data.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) events = mne.make_fixed_length_events(raw, duration=0.2)[:2] epochs = mne.Epochs(raw, events, tmin=0., tmax=0.2) evoked = epochs.average() with pytest.warns(RuntimeWarning, match='Too few samples .* estimate may be unreliable'): data_cov = mne.compute_covariance(epochs) fwd = mne.make_forward_solution(evoked.info, None, mne.setup_volume_source_space(pos=15.0), mne.make_sphere_model()) filters = mne.beamformer.make_lcmv(evoked.info, fwd, data_cov) assert 'weights' in filters
def test_plot_evoked_cov(): """Test plot_evoked with noise_cov.""" evoked = _get_epochs().average() cov = read_cov(cov_fname) cov['projs'] = [] # avoid warnings evoked.plot(noise_cov=cov, time_unit='s') with pytest.raises(TypeError, match='Covariance'): evoked.plot(noise_cov=1., time_unit='s') with pytest.raises(IOError, match='No such file'): evoked.plot(noise_cov='nonexistent-cov.fif', time_unit='s') raw = read_raw_fif(raw_sss_fname) events = make_fixed_length_events(raw) epochs = Epochs(raw, events, picks=picks) cov = compute_covariance(epochs) evoked_sss = epochs.average() with pytest.warns(RuntimeWarning, match='relative scaling'): evoked_sss.plot(noise_cov=cov, time_unit='s') plt.close('all')
def test_plot_evoked_cov(): """Test plot_evoked with noise_cov.""" import matplotlib.pyplot as plt evoked = _get_epochs().average() cov = read_cov(cov_fname) cov['projs'] = [] # avoid warnings evoked.plot(noise_cov=cov, time_unit='s') with pytest.raises(TypeError, match='Covariance'): evoked.plot(noise_cov=1., time_unit='s') with pytest.raises(IOError, match='No such file'): evoked.plot(noise_cov='nonexistent-cov.fif', time_unit='s') raw = read_raw_fif(raw_sss_fname) events = make_fixed_length_events(raw) epochs = Epochs(raw, events) cov = compute_covariance(epochs) evoked_sss = epochs.average() with warnings.catch_warnings(record=True) as w: evoked_sss.plot(noise_cov=cov, time_unit='s') plt.close('all') assert any('relative scal' in str(ww.message) for ww in w)
def test_rank_deficiency(): """Test adding noise from M/EEG float32 (I/O) cov with projectors.""" # See gh-5940 evoked = read_evokeds(ave_fname, 0, baseline=(None, 0)) evoked.info['bads'] = ['MEG 2443'] evoked.info['lowpass'] = 20 # fake for decim picks = pick_types(evoked.info, meg=True, eeg=False) picks = picks[::16] evoked.pick_channels([evoked.ch_names[pick] for pick in picks]) evoked.info.normalize_proj() cov = read_cov(cov_fname) cov['projs'] = [] cov = regularize(cov, evoked.info, rank=None) cov = pick_channels_cov(cov, evoked.ch_names) evoked.data[:] = 0 add_noise(evoked, cov) cov_new = compute_covariance( EpochsArray(evoked.data[np.newaxis], evoked.info), verbose='error') assert cov['names'] == cov_new['names'] r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1] assert r > 0.98
def _calc_inverse(params): subject, epochs, overwrite = params epo = op.join(REMOTE_ROOT_DIR, 'ave', '{}_ecr_nTSSS_conflict-epo.fif'.format(subject)) fwd = op.join(REMOTE_ROOT_DIR, 'fwd', '{}_ecr-fwd.fif'.format(subject)) local_inv_file_name = op.join(LOCAL_ROOT_DIR, 'inv', '{}_ecr_nTSSS_conflict-inv.fif'.format(subject)) if os.path.isfile(local_inv_file_name) and not overwrite: inverse_operator = read_inverse_operator(local_inv_file_name) print('inv already calculated for {}'.format(subject)) else: if epochs is None: epochs = mne.read_epochs(epo) noise_cov = mne.compute_covariance(epochs.crop(None, 0, copy=True)) inverse_operator = None if not os.path.isfile(fwd): print('no fwd for {}'.format(subject)) else: forward = mne.read_forward_solution(fwd) inverse_operator = make_inverse_operator(epochs.info, forward, noise_cov, loose=None, depth=None) write_inverse_operator(local_inv_file_name, inverse_operator) return inverse_operator
true_ori = fwd_disc_true['src'][0]['nn'][config.vertex] # del info, fwd_disc_true, er_raw epochs = create_epochs(raw) ############################################################################### # Sensor-level analysis ############################################################################### epochs_grad = epochs.copy().pick_types(meg='grad') epochs_mag = epochs.copy().pick_types(meg='mag') epochs_joint = epochs.copy().pick_types(meg=True) # Make cov matrices cov = mne.compute_covariance(epochs, tmin=0, tmax=1, method='empirical') noise_cov = mne.compute_covariance(epochs, tmin=-1, tmax=0, method='empirical') # Compute evokeds evoked_grad = epochs_grad.average() evoked_mag = epochs_mag.average() evoked_joint = epochs_joint.average() ############################################################################### # Compute LCMV beamformer results ############################################################################### # Read in forward solution fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man) dists = []
event_id, tmin, tmax, baseline=(None, 0), preload=True, proj=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) evoked = epochs.average() forward = mne.read_forward_solution(fname_fwd) forward = mne.convert_forward_solution(forward, surf_ori=True) # Compute regularized noise and data covariances noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk', rank=None) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15, method='shrunk', rank=None) evoked.plot(time_unit='s') ############################################################################### # Run beamformers and look at maximum outputs pick_oris = [None, 'normal', 'max-power', None] descriptions = ['Free', 'Normal', 'Max-power', 'Fixed']
def test_low_rank(): """Test low-rank covariance matrix estimation.""" raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3) raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj n_ch = 366 proj_rank = 365 # one EEG proj events = make_fixed_length_events(raw) methods = ('empirical', 'diagonal_fixed', 'oas') epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True) bounds = { 'None': dict(empirical=(-6000, -5000), diagonal_fixed=(-1500, -500), oas=(-700, -600)), 'full': dict(empirical=(-9000, -8000), diagonal_fixed=(-2000, -1600), oas=(-1600, -1000)), } for rank in ('full', None): covs = compute_covariance(epochs, method=methods, return_estimators=True, verbose='error', rank=rank) for cov in covs: method = cov['method'] these_bounds = bounds[str(rank)][method] this_rank = _cov_rank(cov, epochs.info) if rank is None or method == 'empirical': assert this_rank == sss_proj_rank else: assert this_rank == proj_rank assert these_bounds[0] < cov['loglik'] < these_bounds[1], \ (rank, method) if method == 'empirical': emp_cov = cov # save for later, rank param does not matter # Test equivalence with mne.cov.regularize subspace with pytest.raises(ValueError, match='are dependent.*must equal'): regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2) assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == proj_rank del reg_cov with catch_logging() as log: reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None, verbose=True) log = log.getvalue() assert 'jointly' in log assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank assert_allclose(reg_r_only_cov['data'], reg_r_cov['data']) del reg_r_only_cov, reg_r_cov # test that rank=306 is same as rank='full' epochs_meg = epochs.copy().pick_types() assert len(epochs_meg.ch_names) == 306 epochs_meg.info.update(bads=[], projs=[]) cov_full = compute_covariance(epochs_meg, method='oas', rank='full', verbose='error') assert _cov_rank(cov_full, epochs_meg.info) == 306 cov_dict = compute_covariance(epochs_meg, method='oas', rank=306, verbose='error') assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) # Work with just EEG data to simplify projection / rank reduction raw.pick_types(meg=False, eeg=True) n_proj = 2 raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj)) n_ch = len(raw.ch_names) rank = n_ch - n_proj - 1 # plus avg proj assert len(raw.info['projs']) == 3 epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True) assert len(raw.ch_names) == n_ch emp_cov = compute_covariance(epochs, rank='full', verbose='error') assert _cov_rank(emp_cov, epochs.info) == rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == rank reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_cov, epochs.info) == rank dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed', verbose='error') assert _cov_rank(dia_cov, epochs.info) == rank assert_allclose(dia_cov['data'], reg_cov['data']) # test our deprecation: can simply remove later epochs.pick_channels(epochs.ch_names[:103]) # degenerate with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='pca') with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='factor_analysis')
def test_compute_covariance_auto_reg(rank): """Test automated regularization.""" raw = read_raw_fif(raw_fname, preload=True) raw.resample(100, npad='auto') # much faster estimation events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(mag=4e-12) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) # we need a few channels for numerical reasons in PCA/FA picks = pick_types(raw.info, meg='mag', eeg=False)[:10] raw.pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) epochs = epochs.crop(None, 0)[:5] method_params = dict(factor_analysis=dict(iter_n_components=[3]), pca=dict(iter_n_components=[3])) covs = compute_covariance(epochs, method='auto', method_params=method_params, return_estimators=True, rank=rank) # make sure regularization produces structured differencess diag_mask = np.eye(len(epochs.ch_names)).astype(bool) off_diag_mask = np.invert(diag_mask) for cov_a, cov_b in itt.combinations(covs, 2): if (cov_a['method'] == 'diagonal_fixed' and # here we have diagnoal or no regularization. cov_b['method'] == 'empirical' and rank == 'full'): assert not np.any( cov_a['data'][diag_mask] == cov_b['data'][diag_mask]) # but the rest is the same assert_array_equal(cov_a['data'][off_diag_mask], cov_b['data'][off_diag_mask]) else: # and here we have shrinkage everywhere. assert not np.any( cov_a['data'][diag_mask] == cov_b['data'][diag_mask]) assert not np.any( cov_a['data'][diag_mask] == cov_b['data'][diag_mask]) logliks = [c['loglik'] for c in covs] assert np.diff(logliks).max() <= 0 # descending order methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage'] if rank == 'full': methods.extend(['factor_analysis', 'pca']) cov3 = compute_covariance(epochs, method=methods, method_params=method_params, projs=None, return_estimators=True, rank=rank) method_names = [cov['method'] for cov in cov3] best_bounds = [-45, -35] bounds = [-55, -45] if rank == 'full' else best_bounds for method in set(methods) - set(['empirical', 'shrunk']): this_lik = cov3[method_names.index(method)]['loglik'] assert bounds[0] < this_lik < bounds[1] this_lik = cov3[method_names.index('shrunk')]['loglik'] assert best_bounds[0] < this_lik < best_bounds[1] this_lik = cov3[method_names.index('empirical')]['loglik'] bounds = [-110, -100] if rank == 'full' else best_bounds assert bounds[0] < this_lik < bounds[1] assert_equal(set([c['method'] for c in cov3]), set(methods)) cov4 = compute_covariance(epochs, method=methods, method_params=method_params, projs=None, return_estimators=False, rank=rank) assert cov3[0]['method'] == cov4['method'] # ordering # invalid prespecified method pytest.raises(ValueError, compute_covariance, epochs, method='pizza') # invalid scalings pytest.raises(ValueError, compute_covariance, epochs, method='shrunk', scalings=dict(misc=123))
events = mne.make_fixed_length_events(raw, id=1, duration=.250) # Epoch length is 1.5 second meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None, detrend=1, decim=1, preload=True) emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None) # Prepare data X = np.array([ mne.compute_covariance(meg_epochs[ii], method='oas')['data'][None] for ii in range(len(meg_epochs)) ]) y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power n_sub, n_fb, n_ch, _ = X.shape # Define models identity = ProjIdentitySpace() lw = ProjLWSpace(shrink=shrink) commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg) spoc = ProjSPoCSpace(shrink=shrink, scale=scale, n_compo=n_compo, reg=reg) logdiag = LogDiag() naivevec = NaiveVec(method='upper')
def test_tf_lcmv(): """Test TF beamforming based on LCMV.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) event_id, tmin, tmax = 1, -0.2, 0.2 # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, selection=left_temporal_channels) picks = picks[::2] # decimate for speed raw.pick_channels([raw.ch_names[ii] for ii in picks]) raw.info.normalize_proj() # avoid projection warnings del picks # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=None, preload=False, reject=reject) epochs.load_data() freq_bins = [(4, 12), (15, 40)] time_windows = [(-0.1, 0.1), (0.0, 0.2)] win_lengths = [0.2, 0.2] tstep = 0.1 reg = 0.05 source_power = [] noise_covs = [] for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths): raw_band = raw.copy() raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, iir_params=dict(output='ba')) epochs_band = mne.Epochs(raw_band, epochs.events, epochs.event_id, tmin=tmin, tmax=tmax, baseline=None, proj=True) noise_cov = mne.compute_covariance(epochs_band, tmin=tmin, tmax=tmin + win_length) noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=reg, grad=reg, eeg=reg, proj=True, rank=None) noise_covs.append(noise_cov) del raw_band # to save memory # Manually calculating source power in on frequency band and several # time windows to compare to tf_lcmv results and test overlapping if (l_freq, h_freq) == freq_bins[0]: for time_window in time_windows: data_cov = mne.compute_covariance(epochs_band, tmin=time_window[0], tmax=time_window[1]) stc_source_power = _lcmv_source_power( epochs.info, forward, noise_cov, data_cov, reg=reg, label=label, weight_norm='unit-noise-gain') source_power.append(stc_source_power.data) pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, reg=reg, label=label) stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, reg=reg, label=label, raw=raw) assert (len(stcs) == len(freq_bins)) assert (stcs[0].shape[1] == 4) # Averaging all time windows that overlap the time period 0 to 100 ms source_power = np.mean(source_power, axis=0) # Selecting the first frequency bin in tf_lcmv results stc = stcs[0] # Comparing tf_lcmv results with _lcmv_source_power results assert_array_almost_equal(stc.data[:, 2], source_power[:, 0]) # Test if using unsupported max-power orientation is detected pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, pick_ori='max-power') # Test if incorrect number of noise CSDs is detected # Test if incorrect number of noise covariances is detected pytest.raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin, tmax, tstep, win_lengths, freq_bins) # Test if freq_bins and win_lengths incompatibility is detected pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins) # Test if time step exceeding window lengths is detected pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins) # Test if missing of noise covariance matrix is detected when more than # one channel type is present in the data pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs=None, tmin=tmin, tmax=tmax, tstep=tstep, win_lengths=win_lengths, freq_bins=freq_bins) # Test if unsupported weight normalization specification is detected pytest.raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, weight_norm='nai') # Test unsupported pick_ori (vector not supported here) with pytest.raises(ValueError, match='pick_ori must be one of'): tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, pick_ori='vector') # Test correct detection of preloaded epochs objects that do not contain # the underlying raw object epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=True) epochs_preloaded._raw = None pytest.raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins) # Pass only one epoch to test if subtracting evoked # responses yields zeros with pytest.warns(RuntimeWarning, match='Too few samples .* estimate may be unreliable'): stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins, subtract_evoked=True, reg=reg, label=label, raw=raw) assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
def test_channel_name_limit(tmpdir, monkeypatch, fname): """Test that our remapping works properly.""" # # raw # if fname.endswith('fif'): raw = read_raw_fif(fname) raw.pick_channels(raw.ch_names[:3]) ref_names = [] data_names = raw.ch_names else: assert fname.endswith('.ds') raw = read_raw_ctf(fname) ref_names = [ raw.ch_names[pick] for pick in pick_types(raw.info, meg=False, ref_meg=True) ] data_names = raw.ch_names[32:35] proj = dict(data=np.ones((1, len(data_names))), col_names=data_names[:2].copy(), row_names=None, nrow=1) proj = Projection(data=proj, active=False, desc='test', kind=0, explained_var=0.) raw.add_proj(proj, remove_existing=True) raw.info.normalize_proj() raw.pick_channels(data_names + ref_names).crop(0, 2) long_names = ['123456789abcdefg' + name for name in raw.ch_names] fname = tmpdir.join('test-raw.fif') with catch_logging() as log: raw.save(fname) log = log.getvalue() assert 'truncated' not in log rename = dict(zip(raw.ch_names, long_names)) long_data_names = [rename[name] for name in data_names] long_proj_names = long_data_names[:2] raw.rename_channels(rename) for comp in raw.info['comps']: for key in ('row_names', 'col_names'): for name in comp['data'][key]: assert name in raw.ch_names if raw.info['comps']: assert raw.compensation_grade == 0 raw.apply_gradient_compensation(3) assert raw.compensation_grade == 3 assert len(raw.info['projs']) == 1 assert raw.info['projs'][0]['data']['col_names'] == long_proj_names raw.info['bads'] = bads = long_data_names[2:3] good_long_data_names = [ name for name in long_data_names if name not in bads ] with catch_logging() as log: raw.save(fname, overwrite=True, verbose=True) log = log.getvalue() assert 'truncated to 15' in log for name in raw.ch_names: assert len(name) > 15 # first read the full way with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'Reading extended channel information' in log for ra in (raw, raw_read): assert ra.ch_names == long_names assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names del raw_read # next read as if no longer names could be read monkeypatch.setattr(meas_info, '_read_extended_ch_info', lambda x, y, z: None) with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'extended' not in log if raw.info['comps']: assert raw_read.compensation_grade == 3 raw_read.apply_gradient_compensation(0) assert raw_read.compensation_grade == 0 monkeypatch.setattr( # restore meas_info, '_read_extended_ch_info', _read_extended_ch_info) short_proj_names = [ f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' for ni, name in enumerate(long_data_names[:2]) ] assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names # # epochs # epochs = Epochs(raw, make_fixed_length_events(raw)) fname = tmpdir.join('test-epo.fif') epochs.save(fname) epochs_read = read_epochs(fname) for ep in (epochs, epochs_read): assert ep.info['ch_names'] == long_names assert ep.ch_names == long_names del raw, epochs_read # cov epochs.info['bads'] = [] cov = compute_covariance(epochs, verbose='error') fname = tmpdir.join('test-cov.fif') write_cov(fname, cov) cov_read = read_cov(fname) for co in (cov, cov_read): assert co['names'] == long_data_names assert co['bads'] == [] del cov_read # # evoked # evoked = epochs.average() evoked.info['bads'] = bads assert evoked.nave == 1 fname = tmpdir.join('test-ave.fif') evoked.save(fname) evoked_read = read_evokeds(fname)[0] for ev in (evoked, evoked_read): assert ev.ch_names == long_names assert ev.info['bads'] == bads del evoked_read, epochs # # forward # with pytest.warns(None): # not enough points for CTF sphere = make_sphere_model('auto', 'auto', evoked.info) src = setup_volume_source_space( pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) fwd = make_forward_solution(evoked.info, None, src, sphere) fname = tmpdir.join('temp-fwd.fif') write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) for fw in (fwd, fwd_read): assert fw['sol']['row_names'] == long_data_names assert fw['info']['ch_names'] == long_data_names assert fw['info']['bads'] == bads del fwd_read # # inv # inv = make_inverse_operator(evoked.info, fwd, cov) fname = tmpdir.join('test-inv.fif') write_inverse_operator(fname, inv) inv_read = read_inverse_operator(fname) for iv in (inv, inv_read): assert iv['info']['ch_names'] == good_long_data_names apply_inverse(evoked, inv) # smoke test
def get_data(base_path, dipole_idx, dipole_amplitude, use_maxwell_filter, bads=[], show=False): if "phantom_aston" in base_path: data_path = base_path + '/tSSS mc Data' data_path = data_path + '/Amp%d_IASoff_movement/' % dipole_amplitude fname = 'Amp%d_Dip%d_IASoff_movement_tsss_mc.fif' % (dipole_amplitude, dipole_idx) # fname = 'Amp%d_Dip%d_IASoff.fif' % (dipole_amplitude, dipole_idx) stim_channel = 'SYS201' # assert use_maxwell_filter in ['mne', False] else: data_path = base_path + '/%dnAm/' % dipole_amplitude if use_maxwell_filter is True: fname = 'dip%02d_%dnAm_sss.fif' % (dipole_idx, dipole_amplitude) else: fname = 'dip%02d_%dnAm.fif' % (dipole_idx, dipole_amplitude) stim_channel = 'STI201' raw_fname = op.join(data_path, fname) raw = mne.io.read_raw_fif(raw_fname, preload=True, verbose='error') raw.info['bads'] = bads if "phantom_aston" in base_path: raw.crop(20, None) events = mne.find_events(raw, stim_channel=stim_channel) if show: raw.plot(events=events) if show: raw.plot_psd(tmax=np.inf, fmax=60, average=False) raw.fix_mag_coil_types() if use_maxwell_filter == 'mne': # Use Maxwell filtering from MNE raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.)) if show: raw.plot(events=events) ####################################################################### # We know our phantom produces sinusoidal bursts below 25 Hz, so let's # filter. raw.filter(None, 40., h_trans_bandwidth='auto', filter_length='auto', phase='zero') if show: raw.plot(events=events) ####################################################################### # Now we epoch our data, average it tmin, tmax = -0.15, 0.1 event_id = events[0, 2] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.05), preload=True) evoked = epochs.average() if show: evoked.plot(spatial_colors=True) if show: evoked.plot_joint() evoked.crop(0, None) sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08) cov = mne.compute_covariance(epochs, tmax=-0.05) print(fname + " nave=%d" % evoked.nave, end='') return epochs, evoked, cov, sphere
def run_evoked(subject_id): subject = "sub%03d" % subject_id print("processing subject: %s" % subject) in_path = op.join(data_path, "EEG_Process") evo_path = op.join(data_path, "EEG_Evoked") for run in range(1, 2): fname = op.join(in_path, 'sub_%03d_raw-epo.fif' % (subject_id, )) epochs = mne.read_epochs(fname, preload=True) #compute covariance for later on (inverse solution) fname_cov = op.join(evo_path, "sub_%03d_LSF_HSF-cov.fif" % (subject_id, )) cv = KFold(3, random_state=97) # make sure cv is deterministic cov = mne.compute_covariance(epochs, tmax=-0.01, method='shrunk', cv=cv) cov.save(fname_cov) mne.viz.plot_cov(cov, epochs.info) #general: HSF vs LSF evoked_LSF = epochs['LSF'].average() evoked_HSF = epochs['HSF'].average() contrast = mne.combine_evoked([evoked_HSF, evoked_LSF], weights=[1, -1]) #name the conditions # Simplify comment evoked_LSF.comment = 'evoked_LSF' evoked_HSF.comment = 'evoked_HSF' contrast.comment = 'contrast' #contrast.plot(picks=('Oz'), window_title='CONTRAST') #plot #evoked_LSF.plot(picks=['Oz'], window_title='evoked, condition LSF, electrode Oz') #evoked_HSF.plot(picks=['Oz'], window_title='evoked, condition HSF, electrode Oz') fname_evo = op.join(evo_path, "sub_%03d_LSF_HSF-ave.fif" % (subject_id, )) mne.evoked.write_evokeds(fname_evo, [evoked_LSF, evoked_HSF, contrast]) #compute forward solution for later on (inverse solution) fname_fwd = op.join(evo_path, "sub_%03d_LSF_HSF-fwd.fif" % (subject_id, )) info = mne.io.read_info(fname_evo) fwd = mne.make_forward_solution(info=info, trans=trans, src=src, bem=bem, eeg=True, mindist=5.0, n_jobs=1) print(fwd) leadfield = fwd['sol']['data'] print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape) mne.write_forward_solution(fname_fwd, fwd, overwrite=True) # for illustration purposes use fwd to compute the sensitivity map eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') eeg_map.plot(time_label='EEG sensitivity LSF', clim=dict(lims=[5, 50, 100]))
n_dipoles=n_dipoles, times=times, data_fun=data_fun, random_state=rng) # look at our source data fig, ax = plt.subplots(1) ax.plot(times, 1e9 * stc.data.T) ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)') mne.viz.utils.plt_show() ############################################################################## # Simulate raw data raw_sim = simulate_raw(raw.info, [stc] * 10, forward=fwd, cov=None, verbose=True) cov = make_ad_hoc_cov(raw_sim.info) add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], random_state=rng) add_ecg(raw_sim, random_state=rng) add_eog(raw_sim, random_state=rng) raw_sim.plot() ############################################################################## # Plot evoked data events = find_events(raw_sim) # only 1 pos, so event number == 1 epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration) cov = compute_covariance(epochs, tmax=0., method='empirical', verbose='error') # quick calc evoked = epochs.average() evoked.plot_white(cov, time_unit='s')
# should use baseline correction when constructing the epochs. Otherwise the # covariance matrix will be inaccurate. In MNE this is done by default, but # just to be sure, we define it here manually. events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5, baseline=(-0.2, 0.0), decim=3) # we'll decimate for speed ############################################################################### # Note that this method also attenuates any activity in your # source estimates that resemble the baseline, if you like it or not. noise_cov_baseline = mne.compute_covariance(epochs, tmax=0) ############################################################################### # Plot the covariance matrices # ---------------------------- # # Try setting proj to False to see the effect. Notice that the projectors in # epochs are already applied, so ``proj`` parameter has no effect. noise_cov.plot(raw_empty_room.info, proj=True) noise_cov_baseline.plot(epochs.info, proj=True) ############################################################################### # How should I regularize the covariance matrix? # ---------------------------------------------- # # The estimated covariance can be numerically
epochs = mne.read_epochs(fname.epochs) trans = mne.transforms.read_trans(fname.trans) fwd = mne.read_forward_solution(fname.fwd) ############################################################################### # Sensor-level analysis for beamformer ############################################################################### epochs_grad = epochs.copy().pick_types(meg='grad') epochs_mag = epochs.copy().pick_types(meg='mag') epochs_joint = epochs.copy().pick_types(meg=True) # Make cov matrices noise_cov = mne.compute_covariance(epochs, tmin=-0.2, tmax=0, method='shrunk', rank='info') data_cov = mne.compute_covariance(epochs, tmin=0, tmax=0.4, method='empirical', rank='info') # Compute evokeds tmin = 0.03 tmax = 0.05 evoked_grad = epochs_grad.average().crop(tmin, tmax) evoked_mag = epochs_mag.average().crop(tmin, tmax) evoked_joint = epochs_joint.average().crop(tmin, tmax)
events = mne.find_events(raw, stim_channel='STI 014') event_id, tmin, tmax = 1, -.2, .15 baseline = None epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6), preload=True) evoked = epochs.average() fwd = mne.read_forward_solution(fwd_fname) cov = mne.compute_covariance(epochs) inv = make_inverse_operator(epochs.info, fwd, cov) method = "MNE" snr = 3. lambda2 = 1. / snr**2 stc = apply_inverse(evoked, inv, lambda2, method=method, pick_ori="normal", return_residual=False, verbose=True) pick_vertex = np.argmax(np.linalg.norm(stc.data, axis=1))
par['actiwin']=[0.010, 0.100] dfname_stimcat = dfname + '_' + stimcat epochs_stimcat = epochs[stimcat] #% % Find trial variance > index outliers> remove beyond plow and phigh percentile badtrls, plow, phigh = [], 2.0, 98.0 bad_trials=my_var_cut_fn(epochs_stimcat, plow, phigh, to_plot=False) print('\n%d trial to remove from total %d trials...\nNo. of remaining trials = %d\n'%(len(bad_trials), len(epochs_stimcat), len(epochs_stimcat)-len(bad_trials))) epochs_stimcat.drop(bad_trials, reason='variance based rejection', verbose=True) bad_trials=[] # Compute covariance noise_cov = mne.compute_covariance(epochs_stimcat, tmin=par['ctrlwin'][0], tmax=par['ctrlwin'][1], method='empirical', verbose=True) data_cov = mne.compute_covariance(epochs_stimcat, tmin=par['actiwin'][0], tmax=par['actiwin'][1], method='empirical', verbose=True) evoked = epochs_stimcat.average() evoked = evoked.crop(par['actiwin'][0], par['actiwin'][1]) # Pull rank from data preprocessing history cov_rank = None if epochs_stimcat.info['proc_history']==[] else int(epochs_stimcat.info['proc_history'][0]['max_info']['sss_info']['nfree']) # Compute SNR inverse_operator=mne.minimum_norm.make_inverse_operator(evoked.info, fwd, noise_cov, rank=cov_rank, loose=1, depth=0.199, verbose=True) snr, _ = mne.minimum_norm.estimate_snr(evoked, inverse_operator, verbose=True) peak_ch, peak_time = evoked.get_peak(ch_type='grad') tstep=1000/(evoked.info['sfreq']*1000) tp = int(peak_time//tstep - evoked.times[0]//tstep) SNR=snr[tp]
raw.set_eeg_reference(projection=True) events = mne.find_events(raw) event_id = {'Auditory/Left': 1, 'Auditory/Right': 2} epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True) epochs.info['bads'] = [] evoked = epochs.average() labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir) label_names = [label.name for label in labels] n_labels = len(labels) # %% # Estimate the background noise covariance from the baseline period # ----------------------------------------------------------------- cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) # %% # Generate sinusoids in two spatially distant labels # -------------------------------------------------- # The known signal is all zero-s off of the two labels of interest signal = np.zeros((n_labels, T)) idx = label_names.index('inferiorparietal-lh') signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times) idx = label_names.index('rostralmiddlefrontal-rh') signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times) # %% # Find the center vertices in source space of each label # ------------------------------------------------------
# check for M100 evoked.plot() # set metadata: allows you to specify more complex info about events, # can use pandas-style queries to access subsets of data epochs.metadata = trial_info epochs.save(epochs_fname) # SANITY CHECK!!: assert (len(epochs.events) == len(trial_info)) #------------------------------------------------------------------------------- # step 7- make noise covariance matrix if not op.isfile(cov_fname): noise_cov = compute_covariance(epochs, tmax=0., method=['shrunk']) write_cov(cov_fname, noise_cov) else: noise_cov = read_cov(cov_fname) # if using native MRI, need to make_bem_model if not op.isfile(bem_fname): surfaces = make_bem_model(subject, ico=4, conductivity=(0.3, ), subjects_dir=mri_dir, verbose=None) bem = make_bem_solution(surfaces) write_bem_solution(bem_fname, bem) # step 8- make forward solution
tmin, tmax = -.2, .25 # epoch duration epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax, picks=picks, baseline=(-.2, 0.), preload=True) del raw # covariance matrix for pre-stimulus interval tmin, tmax = -.2, 0. cov_pre = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='empirical') # covariance matrix for post-stimulus interval (around main evoked responses) tmin, tmax = 0.05, .25 cov_post = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='empirical') info = epochs.info del epochs # read forward solution forward = mne.read_forward_solution(fname_fwd) # use forward operator with fixed source orientations mne.convert_forward_solution(forward,
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, selection=left_temporal_channels) picks = picks[::2] # decimate for speed raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info.normalize_proj() # avoid projection warnings if epochs: # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=epochs_preload, reject=reject) if epochs_preload: epochs.resample(200, npad=0, n_jobs=2) epochs.crop(0, None) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov['projs'] = [] # avoid warning noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True, rank=None) if data_cov: data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145) else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
############################################################################### # Power mapping # ------------- # With our simulated dataset ready, we can now pretend to be researchers that # have just recorded this from a real subject and are going to study what parts # of the brain communicate with each other. # # First, we'll create a source estimate of the MEG data. We'll use both a # straightforward MNE-dSPM inverse solution for this, and the DICS beamformer # which is specifically designed to work with oscillatory data. ############################################################################### # Computing the inverse using MNE-dSPM: # Estimating the noise covariance on the trial that only contains noise. cov = mne.compute_covariance(epochs['noise']) inv = make_inverse_operator(epochs.info, fwd, cov) # Apply the inverse model to the trial that also contains the signal. s = apply_inverse(epochs['signal'].average(), inv) # Take the root-mean square along the time dimension and plot the result. s_rms = (s**2).mean() brain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1, size=400) # Indicate the true locations of the source activity on the plot. brain.add_foci(source_vert1, coords_as_verts=True, hemi='lh')
if isMEG: epochs1 = mne.epochs.concatenate_epochs( [epochs, copy.deepcopy(epochs)]) epochs = epochs1[0:n_im] epochs._data = data[:, :, 0:n_times1].copy() del (epochs1) else: epochs = epochs[0:n_im] epochs._data = data[:, :, 0:n_times1].copy() # interplolate bad channels epochs.interpolate_bads(reset_bads=True) evoked = epochs.average() evoked.save(evoked_path) t_cov_baseline = -0.05 noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=t_cov_baseline) noise_cov.save(noise_cov_path) #offset = 0.04 if isMEG else 0.00 time_ind = np.all(np.vstack([times >= -0.05, times <= 0.9]), axis=0) M = data[:, :, time_ind] #times_in_ms = (times[time_ind]-offset)*1000.0 #print len(times_in_ms) #========= actual computation including bootstrap=============================== prior_Q0, prior_Q, prior_sigma_J_list = None, None, None prior_A = dict(lambda0=0.0, lambda1=1.0) depth = None force_fixed = True if depth is None else False MaxIter0, MaxIter = 100, 30 tol0, tol = 1E-4, 2E-2
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, epochs_preload=True, data_cov=True, proj=True): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: forward_surf_ori = _read_forward_solution_meg( fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg( fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None forward_fixed = None forward_vol = None event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, selection=left_temporal_channels) picks = picks[::2] # decimate for speed # add a couple channels we will consider bad bad_picks = [100, 101] bads = [raw.ch_names[pick] for pick in bad_picks] assert not any(pick in picks for pick in bad_picks) picks = np.concatenate([picks, bad_picks]) raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info['bads'] = bads # add more bads if proj: raw.info.normalize_proj() # avoid projection warnings else: raw.del_proj() if epochs: # Read epochs epochs = mne.Epochs( raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0), preload=epochs_preload, reject=reject) if epochs_preload: epochs.resample(200, npad=0) with pytest.warns(RuntimeWarning, match='baseline = None'): epochs.crop(0, None) evoked = epochs.average() info = evoked.info else: epochs = None evoked = None info = raw.info noise_cov = mne.read_cov(fname_cov) noise_cov['projs'] = [] # avoid warning noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True, rank=None) if data_cov: data_cov = mne.compute_covariance( epochs, tmin=0.04, tmax=0.145, verbose='error') # baseline warning else: data_cov = None return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol
# Set up pick list: EEG + MEG - bad channels (modify to your needs) left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads', selection=left_temporal_channels) # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) evoked = epochs.average() forward = mne.read_forward_solution(fname_fwd, surf_ori=True) # Read regularized noise covariance and compute regularized data covariance noise_cov = mne.read_cov(fname_cov) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15, method='shrunk') plt.close('all') pick_oris = [None, 'normal', 'max-power'] names = ['free', 'normal', 'max-power'] descriptions = ['Free orientation', 'Normal orientation', 'Max-power ' 'orientation'] colors = ['b', 'k', 'r'] for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors): stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, pick_ori=pick_ori) # View activation time-series label = mne.read_label(fname_label)
tmin = -0.2 # start of each epoch (200ms before the trigger) tmax = 0.5 # end of each epoch (500ms after the trigger) raw.info['bads'] = ['MEG 2443', 'EEG 053'] baseline = (None, 0) # means from the first instant to t = 0 reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=('meg', 'eog'), baseline=baseline, reject=reject) ############################################################################### # Compute regularized noise covariance # ------------------------------------ # # For more details see :ref:`tut_compute_covariance`. noise_cov = mne.compute_covariance( epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True) fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info) ############################################################################### # Compute the evoked response # --------------------------- # Let's just use the MEG channels for simplicity. evoked = epochs.average().pick('meg') evoked.plot(time_unit='s') evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag', time_unit='s') ############################################################################### # It's also a good idea to look at whitened data:
def test_low_rank_cov(raw_epochs_events): """Test additional properties of low rank computations.""" raw, epochs, events = raw_epochs_events sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj n_ch = 366 proj_rank = 365 # one EEG proj with pytest.warns(RuntimeWarning, match='Too few samples'): emp_cov = compute_covariance(epochs) # Test equivalence with mne.cov.regularize subspace with pytest.raises(ValueError, match='are dependent.*must equal'): regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2) assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == proj_rank with pytest.warns(RuntimeWarning, match='exceeds the theoretical'): _compute_rank_int(reg_cov, info=epochs.info) del reg_cov with catch_logging() as log: reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None, verbose=True) log = log.getvalue() assert 'jointly' in log assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank assert_allclose(reg_r_only_cov['data'], reg_r_cov['data']) del reg_r_only_cov, reg_r_cov # test that rank=306 is same as rank='full' epochs_meg = epochs.copy().pick_types(meg=True) assert len(epochs_meg.ch_names) == 306 with epochs_meg.info._unlock(): epochs_meg.info.update(bads=[], projs=[]) cov_full = compute_covariance(epochs_meg, method='oas', rank='full', verbose='error') assert _cov_rank(cov_full, epochs_meg.info) == 306 with pytest.warns(RuntimeWarning, match='few samples'): cov_dict = compute_covariance(epochs_meg, method='oas', rank=dict(meg=306)) assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) cov_dict = compute_covariance(epochs_meg, method='oas', rank=dict(meg=306), verbose='error') assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) # Work with just EEG data to simplify projection / rank reduction raw = raw.copy().pick_types(meg=False, eeg=True) n_proj = 2 raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj)) n_ch = len(raw.ch_names) rank = n_ch - n_proj - 1 # plus avg proj assert len(raw.info['projs']) == 3 epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True) assert len(raw.ch_names) == n_ch emp_cov = compute_covariance(epochs, rank='full', verbose='error') assert _cov_rank(emp_cov, epochs.info) == rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == rank reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_cov, epochs.info) == rank dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed', verbose='error') assert _cov_rank(dia_cov, epochs.info) == rank assert_allclose(dia_cov['data'], reg_cov['data']) epochs.pick_channels(epochs.ch_names[:103]) # degenerate with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='pca') with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='factor_analysis')
# Create epochs ############################################################################### title = 'Simulated evoked for two signal vertices' epochs = create_epochs(raw2, title=title, fn_simulated_epochs=None, fn_report_h5=fn_report_h5) epochs_grad = epochs.copy().pick_types(meg='grad') epochs_mag = epochs.copy().pick_types(meg='mag') epochs_joint = epochs.copy().pick_types(meg=True) # Make cov matrix data_cov = mne.compute_covariance(epochs, tmin=0, tmax=None, method='empirical') noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0, method='empirical') evoked_grad = epochs_grad.average() evoked_mag = epochs_mag.average() evoked_joint = epochs_joint.average() ############################################################################### # Compute LCMV beamformer results ############################################################################### # Speed things up by restricting the forward solution to only the two
# Also, because we want to combine different channel types (magnetometers and # gradiometers), we need to account for the different amplitude scales of these # channel types. To do this we will supply a noise covariance matrix to the # beamformer, which will be used for whitening. # The data covariance matrix should be estimated from a time window that # includes the brain signal of interest, # and incorporate enough samples for a stable estimate. A rule of thumb is to # use more samples than there are channels in the data set; see # :footcite:`BrookesEtAl2008` for more detailed advice on covariance estimation # for beamformers. Here, we use a time # window incorporating the expected auditory response at around 100 ms post # stimulus and extend the period to account for a low number of trials (72) and # low sampling rate of 150 Hz. data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.25, method='empirical') noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='empirical') data_cov.plot(epochs.info) del epochs # %% # When looking at the covariance matrix plots, we can see that our data is # slightly rank-deficient as the rank is not equal to the number of channels. # Thus, we will have to regularize the covariance matrix before inverting it # in the beamformer calculation. This can be achieved by setting the parameter # ``reg=0.05`` when calculating the spatial filter with # :func:`~mne.beamformer.make_lcmv`. This corresponds to loading the diagonal
def test_cov_estimation_with_triggers(rank): """Test estimation from raw with triggers.""" tempdir = _TempDir() raw = read_raw_fif(raw_fname) raw.set_eeg_reference(projection=True).load_data() events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True) cov = compute_covariance(epochs, keep_sample_mean=True) _assert_cov(cov, read_cov(cov_km_fname)) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert np.all(cov.data != cov_tmin_tmax.data) err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) assert err < 0.05 # cov using a list of epochs and keep_sample_mean=True epochs = [ Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject) for ev_id in event_ids ] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert cov.ch_names == cov2.ch_names # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) _assert_cov(cov, read_cov(cov_fname), nfree=False) method_params = {'empirical': {'assume_centered': False}} pytest.raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method_params=method_params) pytest.raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method='shrunk', rank=rank) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) _assert_cov(cov, cov_read, 1e-5) # cov with list of epochs with different projectors epochs = [ Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True), Epochs(raw, events[:1], None, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False) ] # these should fail pytest.raises(ValueError, compute_covariance, epochs) pytest.raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_covariance(epochs, projs=epochs[0].info['projs']) with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_covariance(epochs, projs=[]) # test new dict support epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0, proj=True, reject=reject, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): compute_covariance(epochs) with pytest.warns(RuntimeWarning, match='Too few samples'): compute_covariance(epochs, projs=[]) pytest.raises(TypeError, compute_covariance, epochs, projs='foo') pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_lcmv_reg_proj(proj, weight_norm): """Test LCMV with and without proj.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.find_events(raw) raw.pick_types(meg=True) assert len(raw.ch_names) == 305 epochs = mne.Epochs(raw, events, None, preload=True, proj=proj) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = mne.compute_covariance(epochs, tmax=0) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) forward = mne.read_forward_solution(fname_fwd) filters = make_lcmv(epochs.info, forward, data_cov, reg=0.05, noise_cov=noise_cov, pick_ori='max-power', weight_norm='nai', rank=None, verbose=True) want_rank = 302 # 305 good channels - 3 MEG projs assert filters['rank'] == want_rank # And also with and without noise_cov with pytest.raises(ValueError, match='several sensor types'): make_lcmv(epochs.info, forward, data_cov, reg=0.05, noise_cov=None) epochs.pick_types(meg='grad') kwargs = dict(reg=0.05, pick_ori=None, weight_norm=weight_norm) filters_cov = make_lcmv(epochs.info, forward, data_cov, noise_cov=noise_cov, **kwargs) filters_nocov = make_lcmv(epochs.info, forward, data_cov, noise_cov=None, **kwargs) ad_hoc = mne.make_ad_hoc_cov(epochs.info) filters_adhoc = make_lcmv(epochs.info, forward, data_cov, noise_cov=ad_hoc, **kwargs) evoked = epochs.average() stc_cov = apply_lcmv(evoked, filters_cov) stc_nocov = apply_lcmv(evoked, filters_nocov) stc_adhoc = apply_lcmv(evoked, filters_adhoc) # Compare adhoc and nocov: scale difference is necessitated by using std=1. if weight_norm == 'unit-noise-gain': scale = np.sqrt(ad_hoc['data'][0]) else: scale = 1. assert_allclose(stc_nocov.data, stc_adhoc.data * scale) a = np.dot(filters_nocov['weights'], filters_nocov['whitener']) b = np.dot(filters_adhoc['weights'], filters_adhoc['whitener']) * scale atol = np.mean(np.sqrt(a * a)) * 1e-7 assert_allclose(a, b, atol=atol, rtol=1e-7) # Compare adhoc and cov: locs might not be equivalent, but the same # general profile should persist, so look at the std and be lenient: if weight_norm == 'unit-noise-gain': adhoc_scale = 0.12 else: adhoc_scale = 1. assert_allclose( np.linalg.norm(stc_adhoc.data, axis=0) * adhoc_scale, np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) assert_allclose( np.linalg.norm(stc_nocov.data, axis=0) / scale * adhoc_scale, np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) if weight_norm == 'nai': # NAI is always normalized by noise-level (based on eigenvalues) for stc in (stc_nocov, stc_cov): assert_allclose(stc.data.std(), 0.584, rtol=0.2) elif weight_norm is None: # None always represents something not normalized, reflecting channel # weights for stc in (stc_nocov, stc_cov): assert_allclose(stc.data.std(), 2.8e-8, rtol=0.1) else: assert weight_norm == 'unit-noise-gain' # Channel scalings depend on presence of noise_cov assert_allclose(stc_nocov.data.std(), 7.8e-13, rtol=0.1) assert_allclose(stc_cov.data.std(), 0.187, rtol=0.2)
fwd = mne.make_forward_solution(epochs.info, trans=transfile, src=src_vol, bem=bem, meg=True, eeg=False, mindist=2.5, n_jobs=1) if 'fwd' in locals() and not len(epochs.ch_names)==fwd['nchan']: fwd = mne.make_forward_solution(epochs.info, trans=transfile, src=src_vol, bem=bem, meg=True, eeg=False, mindist=2.5, n_jobs=1) print("Leadfield size : %d sensors x %d dipoles" % fwd['sol']['data'].shape) #%% Average post-stim data and find input SNR evoked = epochs.average() evoked_pst = evoked.copy().crop(tmin=0.050, tmax=0.500) if more_plots: evoked.comment=dfname evoked.plot(spatial_colors=True, gfp=True, time_unit='ms') evoked_pst.plot(spatial_colors=True, gfp=True, time_unit='ms') noise_cov = mne.compute_covariance(epochs, tmin=-0.500, tmax=-0.050, method='empirical') data_cov = mne.compute_covariance(epochs, tmin=0.050, tmax=0.500, method='empirical') cov_rank = None if raw.info['proc_history']==[] else int(raw.info['proc_history'][0]['max_info']['sss_info']['nfree']) inverse_operator = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov, rank=cov_rank, loose=1.0, depth=0.199) snr_mnep, kk = mne.minimum_norm.estimate_snr(evoked_pst, inverse_operator, verbose=True) peak_ch, peak_time = evoked_pst.get_peak(ch_type='mag') tp = int((peak_time - 0.050)*evoked_pst.info['sfreq']) snr = snr_mnep[tp] snr_10log10 = 10*np.log10(snr) #%% Compute spatial filter and apply on post-stim data filters = mne.beamformer.make_lcmv(evoked.info, fwd, data_cov, reg=0.05, noise_cov=noise_cov, pick_ori='max-power', rank=cov_rank, weight_norm='nai',
subtract_evoked = False # Calculating covariance from empty room noise. To use baseline data as noise # substitute raw for raw_noise, epochs.events for epochs_noise.events, tmin for # desired baseline length, and 0 for tmax_plot. # Note, if using baseline data, the averaged evoked response in the baseline # period should be flat. noise_covs = [] for (l_freq, h_freq) in freq_bins: raw_band = raw_noise.copy() raw_band.filter(l_freq, h_freq, picks=epochs.picks, method='iir', n_jobs=1) epochs_band = mne.Epochs(raw_band, epochs_noise.events, event_id, tmin=tmin_plot, tmax=tmax_plot, baseline=None, picks=epochs.picks, proj=True) noise_cov = compute_covariance(epochs_band) noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=noise_reg, grad=noise_reg, eeg=noise_reg, proj=True) noise_covs.append(noise_cov) del raw_band # to save memory # Computing LCMV solutions for time-frequency windows in a label in source # space for faster computation, use label=None for full solution stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, subtract_evoked=subtract_evoked, reg=data_reg, label=label) # Plotting source spectrogram for source with maximum activity. # Note that tmin and tmax are set to display a time range that is smaller than # the one for which beamforming estimates were calculated. This ensures that # all time bins shown are a result of smoothing across an identical number of
event_id, tmin, tmax = 1, -1., 3. epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, reject=reject, preload=True) evoked = epochs.filter(1, None).average() evoked = evoked.pick_types(meg=True) evoked.crop(tmin=0.008, tmax=0.2, verbose='error') # ignore baseline # Compute noise covariance matrix cov = mne.compute_covariance(epochs, rank='info', tmax=0.) # Handling forward solution forward = mne.read_forward_solution(fwd_fname) ############################################################################### # Run iterative reweighted multidict TF-MxNE solver alpha, l1_ratio = 20, 0.05 loose, depth = 1, 0.95 # Use a multiscale time-frequency dictionary wsize, tstep = [4, 16], [2, 4] n_tfmxne_iter = 10 # Compute TF-MxNE inverse solution with dipole output dipoles, residual = tf_mixed_norm(evoked,