def test_ica_eeg(): """Test ICA on EEG.""" method = 'fastica' raw_fif = read_raw_fif(fif_fname, preload=True) with pytest.warns(RuntimeWarning, match='events'): raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, montage=eeglab_montage, preload=True) for raw in [raw_fif, raw_eeglab]: events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = [] picks_all.extend(picks_meg) picks_all.extend(picks_eeg) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst, picks=picks) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) with pytest.warns(RuntimeWarning, match='MISC channel'): raw = read_raw_ctf(ctf_fname2, preload=True) events = make_fixed_length_events(raw, 99999, start=0, stop=0.2, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = picks_meg + picks_eeg for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst)
def test_check_compensation_consistency(): """Test check picks compensation.""" raw = read_raw_ctf(ctf_fname, preload=False) events = make_fixed_length_events(raw, 99999) picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True) pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] for (comp, expected_result) in zip([0, 1], [False, False]): raw.apply_gradient_compensation(comp) ret, missing = _bad_chans_comp(raw.info, pick_ch_names) assert ret == expected_result assert len(missing) == 0 Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False) pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] for (comp, expected_result) in zip([0, 1], [False, True]): raw.apply_gradient_compensation(comp) ret, missing = _bad_chans_comp(raw.info, pick_ch_names) assert ret == expected_result assert len(missing) == 17 if comp != 0: with pytest.raises(RuntimeError, match='Compensation grade 1 has been applied'): Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) else: Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks)
def test_ica_ctf(): """Test run ICA computation on ctf data with/without compensation.""" method = 'fastica' raw = read_raw_ctf(ctf_fname, preload=True) events = make_fixed_length_events(raw, 99999) for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) evoked = epochs.average() # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(UserWarning, match='did not converge'): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) # test mixed compensation case raw.apply_gradient_compensation(0) ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) raw.apply_gradient_compensation(1) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) evoked = epochs.average() for inst in [raw, epochs, evoked]: with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.apply(inst) with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.get_sources(inst)
def test_check_compensation_consistency(): """Test check picks compensation.""" raw = read_raw_ctf(ctf_fname, preload=False) events = make_fixed_length_events(raw, 99999) picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True) pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] for (comp, expected_result) in zip([0, 1], [False, False]): raw.apply_gradient_compensation(comp) ret, missing = _bad_chans_comp(raw.info, pick_ch_names) assert ret == expected_result assert len(missing) == 0 Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False) pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] for (comp, expected_result) in zip([0, 1], [False, True]): raw.apply_gradient_compensation(comp) ret, missing = _bad_chans_comp(raw.info, pick_ch_names) assert ret == expected_result assert len(missing) == 17 with catch_logging() as log: Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks, verbose=True) assert'Removing 5 compensators' in log.getvalue()
def test_plot_epochs_ctf(): """Test of basic CTF plotting.""" raw = read_raw_ctf(ctf_fname, preload=True) raw.pick_channels([ 'UDIO001', 'UPPT001', 'SCLK01-177', 'BG1-4304', 'MLC11-4304', 'MLC11-4304', 'EEG058', 'UADC007-4302' ]) evts = make_fixed_length_events(raw) epochs = Epochs(raw, evts, preload=True) epochs.plot() plt.close('all') # test butterfly fig = epochs.plot(butterfly=True) keystotest = [ 'b', 'b', 'left', 'right', 'up', 'down', 'pageup', 'pagedown', '-', '+', '=', 'f11', 'home', '?', 'h', 'o', 'end' ] for key in keystotest: fig.canvas.key_press_event(key) fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up fig.canvas.resize_event() fig.canvas.close_event() # closing and epoch dropping plt.close('all')
def test_cov_ctf(): """Test basic cov computation on ctf data with/without compensation.""" raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data() events = make_fixed_length_events(raw, 99999) assert len(events) == 2 ch_names = [ raw.info['ch_names'][pick] for pick in pick_types(raw.info, meg=True, eeg=False, ref_meg=False) ] for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = compute_covariance(epochs, tmax=0., method=['empirical']) prepare_noise_cov(noise_cov, raw.info, ch_names) raw.apply_gradient_compensation(0) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) with pytest.warns(RuntimeWarning, match='Too few samples'): noise_cov = compute_covariance(epochs, tmax=0., method=['empirical']) raw.apply_gradient_compensation(1) # TODO This next call in principle should fail. prepare_noise_cov(noise_cov, raw.info, ch_names) # make sure comps matrices was not removed from raw assert raw.info['comps'], 'Comps matrices removed'
def test_cov_ctf(): """Test basic cov computation on ctf data with/without compensation.""" raw = read_raw_ctf(ctf_fname, preload=True) events = make_fixed_length_events(raw, 99999) ch_names = [ raw.info['ch_names'][pick] for pick in pick_types(raw.info, meg=True, eeg=False, ref_meg=False) ] for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) noise_cov = compute_covariance(epochs, tmax=0., method=['shrunk']) prepare_noise_cov(noise_cov, raw.info, ch_names) raw.apply_gradient_compensation(0) epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True) noise_cov = compute_covariance(epochs, tmax=0., method=['shrunk']) raw.apply_gradient_compensation(1) # TODO This next call in principle should fail. prepare_noise_cov(noise_cov, raw.info, ch_names) # make sure comps matrices was not removed from raw if not raw.info['comps']: raise RuntimeError('Comps matrices removed')
def test_plot_psd_epochs_ctf(raw_ctf): """Test plotting CTF epochs psd (+topomap).""" evts = make_fixed_length_events(raw_ctf) epochs = Epochs(raw_ctf, evts, preload=True) pytest.raises(RuntimeError, epochs.plot_psd_topomap, bands=[(0, 0.01, 'foo')]) # no freqs in range epochs.plot_psd_topomap() # EEG060 is flat in this dataset for dB in [True, False]: with pytest.warns(UserWarning, match='for channel EEG060'): epochs.plot_psd(dB=dB) epochs.drop_channels(['EEG060']) epochs.plot_psd(spatial_colors=False, average=False)
def test_plot_psd_epochs_ctf(): """Test plotting CTF epochs psd (+topomap).""" raw = read_raw_ctf(ctf_fname, preload=True) evts = make_fixed_length_events(raw) epochs = Epochs(raw, evts, preload=True) pytest.raises(RuntimeError, epochs.plot_psd_topomap, bands=[(0, 0.01, 'foo')]) # no freqs in range epochs.plot_psd_topomap() # EEG060 is flat in this dataset err_str = r'channel\(s\) EEG060\.' for dB in [True, False]: with pytest.warns(UserWarning, match=err_str): epochs.plot_psd(dB=dB) epochs.drop_channels(['EEG060']) epochs.plot_psd(spatial_colors=False, average=False) plt.close('all')
def test_plot_epochs_ctf(raw_ctf, mpl_backend): """Test of basic CTF plotting.""" raw_ctf.pick_channels([ 'UDIO001', 'UPPT001', 'SCLK01-177', 'BG1-4304', 'MLC11-4304', 'MLC11-4304', 'EEG058', 'UADC007-4302' ]) evts = make_fixed_length_events(raw_ctf) epochs = Epochs(raw_ctf, evts, preload=True) epochs.plot() mpl_backend._close_all() # test butterfly fig = epochs.plot(butterfly=True) keys = ('b', 'b', 'pagedown', 'down', 'up', 'down', 'right', 'left', '-', '+', '=', 'd', 'd', 'pageup', 'home', 'end', 'z', 'z', 's', 's', 'f11', '?', 'h', 'j') for key in keys: fig._fake_keypress(key) fig._fake_scroll(0.5, 0.5, -0.5) # scroll down fig._fake_scroll(0.5, 0.5, 0.5) # scroll up fig._resize_by_factor(1) fig._fake_keypress('escape') # close and drop epochs
# reading epochs without preloading means that bad epoch rejection is delayed # until later. To perform bad epoch rejection based on the reject parameter # passed here, run epochs.drop_bad_epochs(). This is done automatically in # tf_lcmv to reject bad epochs based on unfiltered data. event_id = 1 events = mne.read_events(event_fname) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=None, preload=False, reject=reject) # Read empty room noise, preload to allow filtering raw_noise = Raw(noise_fname, preload=True) raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel # Create artificial events for empty room noise data events_noise = make_fixed_length_events(raw_noise, event_id, duration=1.) # Create an epochs object using preload=True to reject bad epochs based on # unfiltered data epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin, tmax, proj=True, picks=picks, baseline=None, preload=True, reject=reject) # Make sure the number of noise epochs is the same as data epochs epochs_noise = epochs_noise[:len(epochs.events)] # Read forward operator forward = mne.read_forward_solution(fname_fwd, surf_ori=True) # Read label label = mne.read_label(fname_label)
events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=False, reject=reject) # Read empty room noise, preload to allow filtering raw_noise = Raw(noise_fname, preload=True) raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel # Create artificial events for empty room noise data events_noise = make_fixed_length_events(raw_noise, event_id, duration=1.) # Create an epochs object using preload=True to reject bad epochs based on # unfiltered data epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=reject) # Make sure the number of noise epochs is the same as data epochs epochs_noise = epochs_noise[:len(epochs.events)]
def test_ica_eeg(): """Test ICA on EEG.""" method = 'fastica' raw_fif = read_raw_fif(fif_fname, preload=True) raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, preload=True) for raw in [raw_fif, raw_eeglab]: events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = [] picks_all.extend(picks_meg) picks_all.extend(picks_eeg) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst, picks=picks) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) with pytest.warns(RuntimeWarning, match='MISC channel'): raw = read_raw_ctf(ctf_fname2, preload=True) events = make_fixed_length_events(raw, 99999, start=0, stop=0.2, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = picks_meg + picks_eeg for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, random_state=0, max_iter=2, method=method) with pytest.warns(None): ica.fit(inst) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst)
def test_channel_name_limit(tmp_path, monkeypatch, fname): """Test that our remapping works properly.""" # # raw # if fname.endswith('fif'): raw = read_raw_fif(fname) raw.pick_channels(raw.ch_names[:3]) ref_names = [] data_names = raw.ch_names else: assert fname.endswith('.ds') raw = read_raw_ctf(fname) ref_names = [ raw.ch_names[pick] for pick in pick_types(raw.info, meg=False, ref_meg=True) ] data_names = raw.ch_names[32:35] proj = dict(data=np.ones((1, len(data_names))), col_names=data_names[:2].copy(), row_names=None, nrow=1) proj = Projection(data=proj, active=False, desc='test', kind=0, explained_var=0.) raw.add_proj(proj, remove_existing=True) raw.info.normalize_proj() raw.pick_channels(data_names + ref_names).crop(0, 2) long_names = ['123456789abcdefg' + name for name in raw.ch_names] fname = tmp_path / 'test-raw.fif' with catch_logging() as log: raw.save(fname) log = log.getvalue() assert 'truncated' not in log rename = dict(zip(raw.ch_names, long_names)) long_data_names = [rename[name] for name in data_names] long_proj_names = long_data_names[:2] raw.rename_channels(rename) for comp in raw.info['comps']: for key in ('row_names', 'col_names'): for name in comp['data'][key]: assert name in raw.ch_names if raw.info['comps']: assert raw.compensation_grade == 0 raw.apply_gradient_compensation(3) assert raw.compensation_grade == 3 assert len(raw.info['projs']) == 1 assert raw.info['projs'][0]['data']['col_names'] == long_proj_names raw.info['bads'] = bads = long_data_names[2:3] good_long_data_names = [ name for name in long_data_names if name not in bads ] with catch_logging() as log: raw.save(fname, overwrite=True, verbose=True) log = log.getvalue() assert 'truncated to 15' in log for name in raw.ch_names: assert len(name) > 15 # first read the full way with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'Reading extended channel information' in log for ra in (raw, raw_read): assert ra.ch_names == long_names assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names del raw_read # next read as if no longer names could be read monkeypatch.setattr(meas_info, '_read_extended_ch_info', lambda x, y, z: None) with catch_logging() as log: raw_read = read_raw_fif(fname, verbose=True) log = log.getvalue() assert 'extended' not in log if raw.info['comps']: assert raw_read.compensation_grade == 3 raw_read.apply_gradient_compensation(0) assert raw_read.compensation_grade == 0 monkeypatch.setattr( # restore meas_info, '_read_extended_ch_info', _read_extended_ch_info) short_proj_names = [ f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' for ni, name in enumerate(long_data_names[:2]) ] assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names # # epochs # epochs = Epochs(raw, make_fixed_length_events(raw)) fname = tmp_path / 'test-epo.fif' epochs.save(fname) epochs_read = read_epochs(fname) for ep in (epochs, epochs_read): assert ep.info['ch_names'] == long_names assert ep.ch_names == long_names del raw, epochs_read # cov epochs.info['bads'] = [] cov = compute_covariance(epochs, verbose='error') fname = tmp_path / 'test-cov.fif' write_cov(fname, cov) cov_read = read_cov(fname) for co in (cov, cov_read): assert co['names'] == long_data_names assert co['bads'] == [] del cov_read # # evoked # evoked = epochs.average() evoked.info['bads'] = bads assert evoked.nave == 1 fname = tmp_path / 'test-ave.fif' evoked.save(fname) evoked_read = read_evokeds(fname)[0] for ev in (evoked, evoked_read): assert ev.ch_names == long_names assert ev.info['bads'] == bads del evoked_read, epochs # # forward # with _record_warnings(): # not enough points for CTF sphere = make_sphere_model('auto', 'auto', evoked.info) src = setup_volume_source_space( pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) fwd = make_forward_solution(evoked.info, None, src, sphere) fname = tmp_path / 'temp-fwd.fif' write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) for fw in (fwd, fwd_read): assert fw['sol']['row_names'] == long_data_names assert fw['info']['ch_names'] == long_data_names assert fw['info']['bads'] == bads del fwd_read # # inv # inv = make_inverse_operator(evoked.info, fwd, cov) fname = tmp_path / 'test-inv.fif' write_inverse_operator(fname, inv) inv_read = read_inverse_operator(fname) for iv in (inv, inv_read): assert iv['info']['ch_names'] == good_long_data_names apply_inverse(evoked, inv) # smoke test
event_id, tmin, tmax, proj=True, picks=picks, baseline=None, preload=True, reject=reject) # Read empty room noise raw data raw_noise = Raw(noise_fname) raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel # Create noise epochs and make sure the number of noise epochs corresponds to # the number of data epochs events_noise = make_fixed_length_events(raw_noise, event_id) epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot, tmax_plot, proj=True, picks=picks, baseline=None, preload=True, reject=reject) # then make sure the number of epochs is the same epochs_noise = epochs_noise[:len(epochs.events)] # Read forward operator forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
def test_plot_epochs_selection_butterfly(raw, browser_backend): """Test that using selection and butterfly works.""" events = make_fixed_length_events(raw)[:1] epochs = Epochs(raw, events, tmin=0, tmax=0.5, preload=True, baseline=None) assert len(epochs) == 1 epochs.plot(group_by='selection', butterfly=True)
# Read epochs event_id = 1 events = mne.read_events(event_fname) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=None, preload=True, proj=True, reject=reject) # Read empty room noise raw data raw_noise = mne.io.read_raw_fif(noise_fname, preload=True) raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel raw_noise.pick_channels([raw_noise.ch_names[pick] for pick in picks]) raw_noise.info.normalize_proj() # Create noise epochs and make sure the number of noise epochs corresponds to # the number of data epochs events_noise = make_fixed_length_events(raw_noise, event_id) epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot, tmax_plot, baseline=None, preload=True, proj=True, reject=reject) epochs_noise.info.normalize_proj() epochs_noise.apply_proj() # then make sure the number of epochs is the same epochs_noise = epochs_noise[:len(epochs.events)] # Read forward operator forward = mne.read_forward_solution(fname_fwd) # Read label label = mne.read_label(fname_label) ###############################################################################
def DICS_inverse(fn_epo, event_id=1,event='LLst', ctmin=0.05, ctmax=0.25, fmin=4, fmax=8, min_subject='fsaverage'): """ Inverse evokes into source space using DICS method. ---------- fn_epo : epochs of raw data. event_id: event id related with epochs. ctmin: the min time for computing CSD ctmax: the max time for computing CSD fmin: min value of the interest frequency band fmax: max value of the interest frequency band min_subject: the subject for the common brain space. save_forward: Whether save the forward solution or not. """ from mne import Epochs, pick_types from mne.io import Raw from mne.event import make_fixed_length_events fnlist = get_files_from_list(fn_epo) # loop across all filenames for fname in fnlist: meg_path = os.path.split(fname)[0] name = os.path.basename(fname) stc_name = name[:name.rfind('-epo.fif')] subject = name.split('_')[0] subject_path = subjects_dir + '/%s' %subject min_dir = subjects_dir + '/%s' %min_subject fn_trans = meg_path + '/%s-trans.fif' % subject fn_src = subject_path + '/bem/%s-ico-5-src.fif' % subject fn_bem = subject_path + '/bem/%s-5120-5120-5120-bem-sol.fif' % subject # Make sure the target path is exist stc_path = min_dir + '/DICS_ROIs/%s' % subject set_directory(stc_path) # Read the MNI source space epochs = mne.read_epochs(fname) tmin = epochs.times.min() tmax = epochs.times.max() fn_empty = meg_path + '/%s_empty,nr-raw.fif' % subject raw_noise = Raw(fn_empty, preload=True) epochs.info['bads'] = raw_noise.info['bads'] picks_noise = pick_types(raw_noise.info, meg='mag', exclude='bads') events_noise = make_fixed_length_events(raw_noise, event_id, duration=1.) epochs_noise = Epochs(raw_noise, events_noise, event_id, tmin, tmax, proj=True, picks=picks_noise, baseline=None, preload=True, reject=None) # Make sure the number of noise epochs is the same as data epochs epochs_noise = epochs_noise[:len(epochs.events)] evoked = epochs.average() forward = mne.make_forward_solution(epochs.info, trans=fn_trans, src=fn_src, bem=fn_bem, fname=None, meg=True, eeg=False, mindist=5.0, n_jobs=2, overwrite=True) forward = mne.convert_forward_solution(forward, surf_ori=True) from mne.time_frequency import compute_epochs_csd from mne.beamformer import dics data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=ctmin, tmax=ctmax, fmin=fmin, fmax=fmax) noise_csd = compute_epochs_csd(epochs_noise, mode='multitaper', tmin=ctmin, tmax=ctmax, fmin=fmin, fmax=fmax) stc = dics(evoked, forward, noise_csd, data_csd) from mne import morph_data stc_morph = morph_data(subject, min_subject, stc, grade=5, smooth=5) stc_morph.save(stc_path + '/%s_%d_%d' % (event, fmin, fmax), ftype='stc')