def test_cov_estimation_on_raw_segment(): """Estimate raw on continuous recordings (typically empty room) """ raw = Raw(raw_fname) cov = compute_raw_data_covariance(raw) cov_mne = read_cov(erm_cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) print (linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-6 # test IO when computation done in Python cov.save('test-cov.fif') # test saving cov_read = read_cov('test-cov.fif') assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) cov = compute_raw_data_covariance(raw, picks=picks) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks], ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-6
def test_cov_estimation_on_raw_segment(): """Test estimation from raw on continuous recordings (typically empty room) """ tempdir = _TempDir() raw = Raw(raw_fname, preload=False) cov = compute_raw_data_covariance(raw) cov_mne = read_cov(erm_cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) cov = compute_raw_data_covariance(raw, picks=picks) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks], ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4) # make sure we get a warning with too short a segment raw_2 = raw.crop(0, 1) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov = compute_raw_data_covariance(raw_2) assert_true(len(w) == 1)
def get_epochs_and_cov(X, y, window=500): """return epochs from array.""" raw_train = toMNE(X, y) picks = range(len(getChannelNames())) events = list() events_id = dict() for j, eid in enumerate(getEventNames()): tmp = find_events(raw_train, stim_channel=eid, verbose=False) tmp[:, -1] = j + 1 events.append(tmp) events_id[eid] = j + 1 # concatenate and sort events events = np.concatenate(events, axis=0) order_ev = np.argsort(events[:, 0]) events = events[order_ev] epochs = Epochs(raw_train, events, events_id, tmin=-(window / 500.0) + 1 / 500.0 + 0.150, tmax=0.150, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=False) cov_signal = compute_raw_data_covariance(raw_train, verbose=False) return epochs, cov_signal
def INVERSE(wdir, Subject, epoch_info, evokeds): # import parameters from configuration file from configuration import ( lambda2, method ) # compute noise covariance from empty room data emptyroom_raw = mne.io.Raw(wdir + '/data/maxfilter/' + Subject + '/'+ Subject +'_empty_sss.fif') noise_cov = mne.compute_raw_data_covariance(emptyroom_raw) # compute dSPM solution fname_fwd = wdir + '/data/forward/' + Subject + '/' + Subject + '_phase1_trans_sss_filt140_raw-ico5-fwd.fif' forward = mne.read_forward_solution(fname_fwd, surf_ori=True) # create inverse operator inverse_operator = make_inverse_operator(epoch_info, forward, noise_cov, loose=0.4, depth=0.8) # Compute inverse solution stcs = [] for evoked in evokeds: stcs.append(apply_inverse(evoked, inverse_operator, lambda2, method=method, pick_ori = None)) # save a covariance picture for visual inspection mne.viz.plot_cov(noise_cov, epoch_info, colorbar=True, proj=True,show_svd=False,show=False) plt.savefig(wdir + "/plots/" + Subject + "_covmat") plt.close() return stcs
def test_lcmv_raw(): """Test LCMV with raw data """ raw, _, _, _, noise_cov, label, forward, _, _, _ =\ _get_data(all_forward=False, epochs=False, data_cov=False) tmin, tmax = 0, 20 start, stop = raw.time_as_index([tmin, tmax]) # use only the left-temporal MEG channels for LCMV left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads', selection=left_temporal_channels) data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax) stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label, start=start, stop=stop, picks=picks) assert_array_almost_equal(np.array([tmin, tmax]), np.array([stc.times[0], stc.times[-1]]), decimal=2) # make sure we get an stc with vertices only in the lh vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']] assert_true(len(stc.vertno[0]) == len(np.intersect1d(vertno[0], label.vertices))) assert_true(len(stc.vertno[1]) == 0)
def test_maxwell_filter_additional(): """Test processing of Maxwell filtered data""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = 'test_move_anon' raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif') with warnings.catch_warnings(record=True): # maxshield # Use 2.0 seconds of data to get stable cov. estimate raw = Raw(raw_fname, preload=False, proj=False, allow_maxshield=True).crop(0., 2., False) # Get MEG channels, compute Maxwell filtered data raw.preload_data() raw.pick_types(meg=True, eeg=False) int_order, ext_order = 8, 3 raw_sss = maxwell.maxwell_filter(raw, int_order=int_order, ext_order=ext_order) # Test io on processed data tempdir = _TempDir() test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = Raw(test_outname, preload=True, proj=False, allow_maxshield=True) # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded._data[:, :], raw_sss._data[:, :], rtol=1e-6, atol=1e-20) # Test rank of covariance matrices for raw and SSS processed data cov_raw = compute_raw_data_covariance(raw) cov_sss = compute_raw_data_covariance(raw_sss) scalings = None cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings) cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info, scalings) assert_equal(cov_raw_rank, raw.info['nchan']) assert_equal(cov_sss_rank, maxwell.get_num_moments(int_order, 0))
def test_cov_estimation_on_raw_segment(): """Estimate raw on continuous recordings (typically empty room) """ raw = Raw(raw_fname) cov = mne.compute_raw_data_covariance(raw) cov_mne = mne.Covariance(erm_cov_fname) assert cov_mne.ch_names == cov.ch_names print (linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) assert (linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-6
def _fit_xdawn(self, raw, epochs=None): # Epochs can be computed from raw info, but might have been computed already if epochs is None: epochs = self._epochs(raw, self._events()) sig_cov = mne.compute_raw_data_covariance(raw, picks=self._picks(raw), verbose=False).data evoked_cov = np.cov(epochs.average().data) evals, evecs = eigh(evoked_cov, sig_cov) # sort eigenvectors by their corresponding eigenvalues and normalize them evecs = evecs[:, np.argsort(evals)[::-1]] self._V = evecs / np.sqrt(np.sum(evecs ** 2, axis=0)) self._A = inv(self._V.T)
def comp_noise(sub_id): """ This function compute and save the noise covariance matrix from the empty room recordings """ fname = "sub_%d_empty_room.fif" % sub_id outname = "sub_%d_empty_room-cov.fif" % sub_id raw = mne.fiff.Raw(fname, preload=True) picks = mne.fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True, exclude='bads') cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=None) cov.save(outname)
def localize_activity(subj, tmax=np.Inf, clean_only=True, reg=0): import find_good_segments as fgs data_path = env.data + '/MEG_data/fifs/' fwd_path = env.data + '/MEG_data/analysis/rest/' fwd_fname = fwd_path + subj + '_rest_LP100_HP0.6_CP3_DS300_raw-5-fwd.fif' # preloading makes computing the covariance a lot faster raw = mne.fiff.Raw(data_path + subj + '_rest_LP100_HP0.6_CP3_DS300_raw.fif', preload=True) picks = mne.fiff.pick_channels_regexp(raw.info['ch_names'], 'M..-*') # we don't need to choose picks because we only work with MEG channels, and # all channels are good fwd = mne.read_forward_solution(fwd_fname) # Load real data as templates if clean_only: start, end, num_chans = fgs.find_good_segments(subj, threshold=3500e-15) if start == 0: start = start + 3 if end - start > tmax: start = end - tmax cov = mne.compute_raw_data_covariance(raw, tmin=start, tmax=end, picks=picks) else: cov = mne.compute_raw_data_covariance(raw, picks=picks) weights = calculate_weights(fwd, cov, reg=reg) data, times = raw[picks, raw.time_as_index(start):raw.time_as_index(end)] print 'Multiplying data by beamformer weights...' sol = np.dot(weights, data) src = mne.SourceEstimate(sol, [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']], times[0], times[1] - times[0]) return src
def test_xdawn_fit(): """Test Xdawn fit.""" # get data raw, events, picks = _get_data() epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False) # =========== Basic Fit test ================= # test base xdawn xd = Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None) xd.fit(epochs) # with this parameters, the overlapp correction must be False assert_equal(xd.correct_overlap, False) # no overlapp correction should give averaged evoked evoked = epochs['cond2'].average() assert_array_equal(evoked.data, xd.evokeds_['cond2'].data) # ========== with signal cov provided ==================== # provide covariance object signal_cov = compute_raw_data_covariance(raw, picks=picks) xd = Xdawn(n_components=2, correct_overlap=False, signal_cov=signal_cov, reg=None) xd.fit(epochs) # provide ndarray signal_cov = np.eye(len(picks)) xd = Xdawn(n_components=2, correct_overlap=False, signal_cov=signal_cov, reg=None) xd.fit(epochs) # provide ndarray of bad shape signal_cov = np.eye(len(picks) - 1) xd = Xdawn(n_components=2, correct_overlap=False, signal_cov=signal_cov, reg=None) assert_raises(ValueError, xd.fit, epochs) # provide another type signal_cov = 42 xd = Xdawn(n_components=2, correct_overlap=False, signal_cov=signal_cov, reg=None) assert_raises(ValueError, xd.fit, epochs) # fit with baseline correction and ovverlapp correction should throw an # error epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=(None, 0), verbose=False) xd = Xdawn(n_components=2, correct_overlap=True) assert_raises(ValueError, xd.fit, epochs)
def test_lcmv_raw(): """Test LCMV with raw data """ forward = mne.read_forward_solution(fname_fwd) label = mne.read_label(fname_label) noise_cov = mne.read_cov(fname_cov) raw = mne.fiff.Raw(fname_raw, preload=False) tmin, tmax = 0, 20 # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels # Set up pick list: EEG + MEG - bad channels (modify to your needs) left_temporal_channels = mne.read_selection('Left-temporal') picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, exclude='bads', selection=left_temporal_channels) noise_cov = mne.read_cov(fname_cov) noise_cov = mne.cov.regularize(noise_cov, raw.info, mag=0.05, grad=0.05, eeg=0.1, proj=True) start, stop = raw.time_as_index([tmin, tmax]) # use only the left-temporal MEG channels for LCMV picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads', selection=left_temporal_channels) data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax) stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label, start=start, stop=stop, picks=picks) assert_array_almost_equal(np.array([tmin, tmax]), np.array([stc.times[0], stc.times[-1]]), decimal=2) # make sure we get an stc with vertices only in the lh vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']] assert_true(len(stc.vertno[0]) == len(np.intersect1d(vertno[0], label.vertices))) assert_true(len(stc.vertno[1]) == 0)
def create_noise_covariance_matrix(fname_empty_room, fname_out=None, verbose=None): """Creates the noise covariance matrix from an empty room file""" print ">>>> estimate noise covariance matrix from empty room file..." # read in data raw_empty = mne.fiff.Raw(fname_empty_room, verbose=verbose) # filter data # pick only MEG channels picks = mne.fiff.pick_types(raw_empty.info, meg=True, exclude='bads') # calculate noise-covariance matrix noise_cov_mat = mne.compute_raw_data_covariance(raw_empty, picks=picks, verbose=verbose) # write noise-covariance matrix to disk if fname_out is not None: mne.write_cov(fname_out, noise_cov_mat) return noise_cov_mat
stc_data[1] = np.roll(stc_data[1], 80) """ stc = generate_sparse_stc(fwd["src"], labels, stc_data, tmin, tstep) ############################################################################### # Generate noisy evoked data picks = mne.fiff.pick_types(raw.info, meg=True) iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180) evoked = generate_evoked(fwd, stc, evoked_template, cov, snr) # , iir_filter=iir_filter) raw[:306, :ntimes] = evoked.data raw = raw.crop(tmax=evoked.times[-1]) raw.save("t2-raw.fif") raw2 = mne.fiff.Raw("t2-raw.fif") cov = mne.compute_raw_data_covariance(raw2) dsim = stc.in_label(labels[0]) pl.plot(dsim.data.T) weights = ve.calculate_weights(fwd, cov, reg=0) d, _ = raw2[:306, :] sol = np.dot(weights, d) src = mne.SourceEstimate(sol, (fwd["src"][0]["vertno"], fwd["src"][1]["vertno"]), tmin, tstep) """ # plotting the localized source const = 1*10**-18 good_data = np.nonzero(abs(src.data[:,150])>const) lv = np.nonzero(abs(src.lh_data[:,150])>const)
def preprocess_raw(sub_id, session): """ This function preprocessess data """ # SETUP AND LOAD FILES #### # name with subject id & session name fname = "sub_%d_%s" % (sub_id, session) # load the raw fif print '\nLoading raw file' raw = fiff.Raw(fname + "_tsss_mc.fif", preload=True) picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') print 'Computing Covariance matrix' cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=None) # FILTER #### # filter raw, lp 128, bp at 50 & 100 raw.filter(None, 128, n_jobs=n_jobs, verbose=True) # steps = np.arange(50, 151, 50) # print '\nBand stop filter at %s' % steps # raw.notch_filter(steps, n_jobs=n_jobs, verbose=True) # ICA #### print '\nRun ICA' ica = ICA(n_components=0.90, n_pca_components=64, max_pca_components=100, noise_cov=None, random_state=0) start, stop = None, None # decompose sources for raw data ica.decompose_raw(raw, start=start, stop=stop, picks=picks) corr = lambda x, y: np.array([pearsonr(a, y.ravel()) for a in x])[:, 0] eog_scores_1 = ica.find_sources_raw(raw, target='EOG001', score_func=corr) eog_scores_2 = ica.find_sources_raw(raw, target='EOG002', score_func=corr) # get maximum correlation index for EOG eog_source_idx_1 = np.abs(eog_scores_1).argmax() eog_source_idx_2 = np.abs(eog_scores_2).argmax() # We now add the eog artifacts to the ica.exclusion list if eog_source_idx_1 == eog_source_idx_2: ica.exclude += [eog_source_idx_1] elif eog_source_idx_1 != eog_source_idx_2: ica.exclude += [eog_source_idx_1, eog_source_idx_2] print eog_source_idx_1, eog_source_idx_2 print ica.exclude # Restore sensor space data raw_ica = ica.pick_sources_raw(raw, include=None) # EPOCHS #### events = mne.find_events(raw_ica, stim_channel="STI101") events_classic = [] events_interupt = [] for i in range(len(events)): if i > 0: if events[i, 2] == 1 and events[i - 1, 2] == 1: events_classic.append(i) elif events[i, 2] == 1 and events[i - 1, 2] == 2: events_interupt.append(i) picks = mne.fiff.pick_types(raw_ica.info, meg=True, eeg=False, eog=False, emg=True, stim=False, exclude='bads') reject = dict(grad=4000e-13) epochs = mne.Epochs(raw_ica, events[events_classic], event_id, tmin, tmax, proj=True, picks=picks, baseline=baseline, preload=False, reject=reject) # SAVE FILES #### raw_ica.save(fname + '_tsss_mc_ica.fif', overwrite=True) cov.save((fname + '_tsss_mc_cov.fif')) epochs.save(fname + '_tsss_mc_ica_epochs.fif')
last = stop raw_segment = raw_data[:, first:last] mu += raw_segment.sum(axis=1) data += np.dot(raw_segment, raw_segment.T) n_samples += raw_segment.shape[1] mu /= n_samples data -= n_samples * mu[:, None] * mu[None, :] data /= (n_samples - 1.0) ch_names = [raw.info['ch_names'][k] for k in picks] data_cov = mne.Covariance(None) data_cov.update(kind=mne.fiff.FIFF.FIFFV_MNE_NOISE_COV, diag=False, dim=len(data), names=ch_names, data=data, projs=cp.deepcopy(raw.info['projs']), bads=raw.info['bads'], nfree=n_samples, eig=None, eigvec=None) noise_cov = mne.compute_raw_data_covariance(er_raw) # note that MNE reads CTF data as magnetometers! noise_cov = mne.cov.regularize(noise_cov, raw.info, mag=noise_reg) events = fg.get_good_events(markers[subj], time, window_length) epochs = mne.Epochs(raw, events, None, 0, window_length, preload=True, baseline=None, detrend=0, picks=picks) stcs = mne.beamformer.lcmv_epochs(epochs, forward, noise_cov.as_diag(), data_cov, reg=data_reg, pick_ori='max-power') labels = [label.morph('fsaverage',subj) for label in net_labels] for label in labels: label_ts = [stc.in_label(label) for stc in stcs] con, freqs, times, n_epochs, n_tapers = mne.connectivity.spectral_connectivity(label_ts, method=method, mode='multitaper', sfreq=raw.info['sfreq'], fmin=[1,4,8,13,30], fmax=[4,8,13,30,50], faverage=True, n_jobs=1, mt_adaptive=False) il = np.tril_indices(label_ts[0].shape[0], k=-1) avg_con = [] for c in con: band_avg = []
data -= n_samples * mu[:, None] * mu[None, :] data /= (n_samples - 1.0) ch_names = [raw.info['ch_names'][k] for k in picks] data_cov = mne.Covariance(None) data_cov.update(kind=mne.fiff.FIFF.FIFFV_MNE_NOISE_COV, diag=False, dim=len(data), names=ch_names, data=data, projs=cp.deepcopy(raw.info['projs']), bads=raw.info['bads'], nfree=n_samples, eig=None, eigvec=None) noise_cov = mne.compute_raw_data_covariance(er_raw) # note that MNE reads CTF data as magnetometers! noise_cov = mne.cov.regularize(noise_cov, raw.info, mag=noise_reg) events = fg.get_good_events(markers[subj], time, window_length) epochs = mne.Epochs(raw, events, None, 0, window_length, preload=True, baseline=None, detrend=0, picks=picks) stcs = mne.beamformer.lcmv_epochs(epochs, forward,
stc_data[1] = np.roll(stc_data[1], 80) ''' stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep) ############################################################################### # Generate noisy evoked data picks = mne.fiff.pick_types(raw.info, meg=True) iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180) evoked = generate_evoked(fwd, stc, evoked_template, cov, snr) #, iir_filter=iir_filter) raw[:306,:ntimes] = evoked.data raw = raw.crop(tmax=evoked.times[-1]) raw.save('t2-raw.fif') raw2 = mne.fiff.Raw('t2-raw.fif') cov = mne.compute_raw_data_covariance(raw2) dsim = stc.in_label(labels[0]) pl.plot(dsim.data.T) weights = ve.calculate_weights(fwd, cov, reg=0) d, _ = raw2[:306,:] sol = np.dot(weights, d) src = mne.SourceEstimate(sol, (fwd['src'][0]['vertno'], fwd['src'][1]['vertno']), tmin, tstep) ''' # plotting the localized source const = 1*10**-18 good_data = np.nonzero(abs(src.data[:,150])>const) lv = np.nonzero(abs(src.lh_data[:,150])>const)
try: subject = sys.argv[1] #Get the subject except: print "Please run with input file provided. Exiting" sys.exit() subjects_dir = '/home/qdong/freesurfer/subjects/' subject_path = subjects_dir + subject #Set the data path of the subject raw_empty_fname = subject_path + '/MEG/%s_emptyroom.fif' % subject raw_empty = mne.fiff.Raw(raw_empty_fname, preload=True) #Filter the empty room data picks_empty = mne.fiff.pick_types(raw_empty.info, meg=True, eeg=False, eog=True, ecg=True, stim=False, exclude='bads') raw_empty.filter(flow, fhigh, picks=picks_empty, n_jobs=njobs, method='iir', iir_params={ 'ftype': filter_type, 'order': filter_order }) #Get the basename raw_empty_basename = os.path.splitext(os.path.basename(raw_empty_fname))[0] cov = mne.compute_raw_data_covariance(raw_empty, picks=picks_empty) mne.write_cov(subject_path + '/MEG/%s_cov.fif' % (raw_empty_basename), cov)
def intra(subj): ''' Performs initial computations within subject and returns average PSD and variance of all epochs. ''' print('Now beginning intra processing on ' + subj + '...\n') * 5 # Set function parameters fname_label = subjects_dir + '/' + subj + '/' + 'label/%s.label' % label_name fname_raw = data_path + subj + '/' + subj + '_rest_raw_sss.fif' if os.path.isfile(data_path + subj + '/' + subj + '_rest_raw_sss-ico-4-fwd.fif'): fname_fwd = data_path + subj + '/' + subj + '_rest_raw_sss-ico-4-fwd.fif' else: print('Subject ' + subj + ' does not have a ico-4-fwd.fif on file.') if label_name.startswith('lh.'): hemi = 'left' elif label_name.startswith('rh.'): hemi = 'right' # Load data label = mne.read_label(fname_label) raw = fiff.Raw(fname_raw) forward_meg = mne.read_forward_solution(fname_fwd) # Estimate noise covariance from teh raw data cov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6)) write_cov(data_path + subj + '/' + subj + '-cov.fif', cov) # Make inverse operator info = raw.info inverse_operator = make_inverse_operator(info, forward_meg, cov, loose=None, depth=0.8) # Epoch data into 4s intervals events = mne.make_fixed_length_events(raw, 1, start=0, stop=None, duration=4.) # Set up pick list: (MEG minus bad channels) include = [] exclude = raw.info['bads'] picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude=exclude) # Read epochs and remove bad epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) # Pull data for averaging later epc_array = epochs.get_data() # Compute the inverse solution inv = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label=label) #Need to add a line here to automatically create stc directory within subj epoch_num = 1 epoch_num_str = str(epoch_num) for i in inv: # i.save(data_path + subj + '/tmp/' + label_name[3:] + '_rest_raw_sss-oct-6-inv' + epoch_num_str) i.save(data_path + subj + '/tmp/' + label_name[3:] + '_rest_raw_sss-ico-4-inv' + epoch_num_str) epoch_num = epoch_num + 1 epoch_num_str = str(epoch_num) # The following is used to remove the empty opposing hemisphere files # and then move the files to save into the appropriate directory if hemi == 'left': filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ] for f in filelist: os.remove(data_path + subj + '/tmp/' + f) keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ] for f in keepers: src = f os.rename(data_path + subj + '/tmp/' + src, data_path + subj + '/inv/' + src) elif hemi == 'right': filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ] for f in filelist: os.remove(data_path + subj + '/tmp/' + f) keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ] for f in keepers: src = f os.rename(data_path + subj + '/tmp/' + src, data_path + subj + '/inv/' + src) # define frequencies of interest bandwidth = 4. # bandwidth of the windows in Hz # compute source space psd in label # Note: By using "return_generator=True" stcs will be a generator object # instead of a list. This allows us so to iterate without having to # keep everything in memory. psd = compute_source_psd_epochs(epochs, inverse_operator, lambda2=lambda2, method=method, fmin=fmin, fmax=fmax, bandwidth=bandwidth, label=label, return_generator=False) epoch_num = 1 epoch_num_str = str(epoch_num) for i in psd: i.save(data_path + subj + '/' + 'tmp' + '/' + label_name[3:] + '_dspm_snr-1_PSD'+ epoch_num_str) epoch_num = epoch_num + 1 epoch_num_str = str(epoch_num) if hemi == 'left': filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ] for f in filelist: os.remove(data_path + subj + '/tmp/' + f) keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ] for f in keepers: src = f os.rename(data_path + subj + '/tmp/' + src,data_path + subj + '/psd/' + src) elif hemi == 'right': filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ] for f in filelist: os.remove(data_path + subj + '/tmp/' + f) keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ] for f in keepers: src = f os.rename(data_path + subj + '/tmp/' + src,data_path + subj + '/psd/' + src) # This code computes the average PSDs of each epoch. Each PSD file is an array of shape N_vertices*N_frequencies. This code averages the PSD value of each vertex together and outputs the average PSD value of each frequency. Then, it averages the PSD values of each epoch, outputting one average PSD value per frequency value, i.e., this is the average across epochs. n_epochs = len(epc_array) for i, stc in enumerate(psd): if i >= n_epochs: break if i == 0: psd_avg = np.mean(stc.data, axis=0) else: psd_avg += np.mean(stc.data, axis=0) print('Length of psd for subject ' + subj + ' is ' + str(len(psd)) + '.') print('Number of epochs for subject ' + subj + ' is ' + str(n_epochs) + '.') if len(psd) != 0: psd_avg /= n_epochs # Compute variance for each epoch and then variance across epochs n_epochs = len(epc_array) for i, stc in enumerate(psd): if i >= n_epochs: psd_var = np.array() break if i == 0: psd_var = np.var(stc.data, axis=0) else: psd_var = np.vstack((psd_var,np.var(stc.data, axis=0))) if len(psd) >= 2: tot_var = np.var(psd_var, axis=0) if len(psd) <= 1: failed_subj = subj print(failed_subj + ' failed. No PSD values calculated, likely because all epochs were rejected.') return failed_subj, failed_subj, failed_subj if len(psd) >= 2: return (psd_avg, tot_var, len(psd_avg))
def run(): args = sys.argv if len(args) <= 1: print 'Usage: run_meg_tutorial.sh <sample data directory>' return sample_dir = args[1] subjects_dir = join(sample_dir, 'subjects') meg_dir = join(sample_dir, 'MEG', 'sample') os.environ['SUBJECTS_DIR'] = subjects_dir os.environ['MEG_DIR'] = meg_dir subject = 'sample' src = setup_source_space(subject, fname=True, spacing='oct6', n_jobs=2, overwrite=True) # If one wanted to use other source spaces, these types of options are # available src_fsaverage = setup_source_space('fsaverage', fname=True, spacing='ico5', n_jobs=2, overwrite=True, add_dist=False) morph_source_spaces(src_fsaverage, subject_to='sample') setup_source_space(subject, fname=True, spacing='all', overwrite=True, n_jobs=2, add_dist=False) # Add distances to source space (if desired, takes a long time) bem_dir = join(subjects_dir, join('sample', 'bem')) os.rename(join(bem_dir, 'sample-oct-6-src.fif'), join(bem_dir, 'sample-oct-6-orig-src.fif')) new_src = add_source_space_distances(src, dist_limit=0.007) new_src.save(join(bem_dir, 'sample-oct-6-src.fif')) # Preprocessing raw = mne.io.Raw(join(meg_dir, 'sample_audvis_raw.fif'), preload=True) raw.info['bads'] = ['MEG 2443', 'EEG 053'] reject = dict(grad=3000e-13, mag=4000e-15, eeg=100e-6) ecg_proj, _ = mne.preprocessing.compute_proj_ecg(raw, l_freq=1, h_freq=100, ch_name='MEG 1531', reject=reject) eog_proj, _ = mne.preprocessing.compute_proj_eog(raw, l_freq=1, h_freq=35, reject=reject, no_proj=True) events = mne.find_events(raw) mne.write_events(join(meg_dir, 'sample_audvis_raw-eve.fif'), events) event_id = [1, 2, 3, 4] tmin, tmax = -0.2, 0.5 picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=True, eog=True) # Average with no filter epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks) evoked = epochs.average() evoked.save(join(meg_dir, 'sample_audvis-no-filter-ave.fif')) raw.filter(l_freq=None, h_freq=40) raw_resampled = raw.resample(150) raw_resampled.save(join(meg_dir, 'sample_audvis_filt-0-40_raw.fif'), overwrite=True) raw.add_proj(ecg_proj) raw.add_proj(eog_proj) resampled_events = mne.find_events(raw_resampled) mne.write_events(join(meg_dir, 'sample_audvis_filt-0-40_raw-eve.fif'), resampled_events) # Average with filter epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks) evoked = epochs.average() evoked.save(join(meg_dir, 'sample_audvis-ave.fif')) # Compute the noise covariance matrix noise_cov = mne.compute_raw_data_covariance(raw, picks=picks) noise_cov.save(join(meg_dir, 'audvis.cov')) # Compute the empty-room noise covariance matrix ernoise_raw = mne.io.Raw(join(meg_dir, 'ernoise_raw.fif'), preload=True) ernoise_raw.info['bads'] = ['MEG 2443'] ernoise_raw.filter(l_freq=None, h_freq=40) picks = mne.pick_types(ernoise_raw.info, meg=True, eeg=True, stim=True, eog=True) ernoise_cov = mne.compute_raw_data_covariance(ernoise_raw, picks=picks) ernoise_cov.save(join(meg_dir, 'ernoise.cov')) ############################################################################### # Compute forward solution a.k.a. lead field trans = join(meg_dir, 'sample_audvis_raw-trans.fif') bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif') # for MEG only fname = join(meg_dir, 'sample_audvis-meg-oct-6-fwd.fif') fwd_meg = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=True, eeg=False, mindist=5.0, n_jobs=2, overwrite=True) # for EEG only bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-5120-5120-bem-sol.fif') fname = join(meg_dir, 'sample_audvis-eeg-oct-6-fwd.fif') fwd_eeg = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=False, eeg=True, mindist=5.0, n_jobs=2, overwrite=True) # for both EEG and MEG fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-fwd.fif') fwd = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=True, eeg=True, mindist=5.0, n_jobs=2, overwrite=True) # Create various sensitivity maps grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='free') grad_map.save(join(meg_dir, 'sample_audvis-grad-oct-6-fwd-sensmap'), ftype='w') mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='free') mag_map.save(join(meg_dir, 'sample_audvis-mag-oct-6-fwd-sensmap'), ftype='w') eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='free') eeg_map.save(join(meg_dir, 'sample_audvis-eeg-oct-6-fwd-sensmap'), ftype='w') grad_map2 = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed') grad_map2.save(join(meg_dir, 'sample_audvis-grad-oct-6-fwd-sensmap-2'), ftype='w') mag_map2 = mne.sensitivity_map(fwd, ch_type='mag', mode='ratio') mag_map2.save(join(meg_dir, 'sample_audvis-mag-oct-6-fwd-sensmap-3'), ftype='w') # Compute some with the EOG + ECG projectors projs = ecg_proj + eog_proj + raw.info['projs'] for map_type in ['radiality', 'angle', 'remaining', 'dampening']: eeg_map = mne.sensitivity_map(fwd, projs=projs, ch_type='eeg', mode=map_type) eeg_map.save( join(meg_dir, 'sample_audvis-eeg-oct-6-fwd-sensmap-' + map_type)) ############################################################################### # Compute MNE inverse operators # # Note: The MEG/EEG forward solution could be used for all # inv_meg = make_inverse_operator(raw.info, fwd_meg, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-inv.fif') write_inverse_operator(fname, inv_meg) inv_eeg = make_inverse_operator(raw.info, fwd_eeg, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-eeg-oct-6-eeg-inv.fif') write_inverse_operator(fname, inv_eeg) inv = make_inverse_operator(raw.info, fwd, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif') write_inverse_operator(fname, inv) # inverse operator with fixed orientation (for testing). Not implemented #inv_fixed = make_inverse_operator(raw.info, fwd_meg, noise_cov, # depth=None, fixed=True) #fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-nodepth-fixed-inv.fif') #write_inverse_operator(fname, inv_fixed) # produce two with diagonal noise (for testing) diag = noise_cov.as_diag() inv_meg_diag = make_inverse_operator(raw.info, fwd_meg, diag, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-diagnoise-inv.fif') write_inverse_operator(fname, inv_meg_diag) inv_eeg_diag = make_inverse_operator(raw.info, fwd, diag, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-meg-eeg-diagnoise-inv.fif') write_inverse_operator(fname, inv_eeg_diag) # Produce stc files evoked.crop(0, 0.25) stc_meg = apply_inverse(evoked, inv_meg, method='MNE') stc_meg.save(join(meg_dir, 'sample_audvis-meg')) stc_eeg = apply_inverse(evoked, inv_eeg, method='MNE') stc_eeg.save(join(meg_dir, 'sample_audvis-eeg')) stc = apply_inverse(evoked, inv, method='MNE') stc.save(join(meg_dir, 'sample_audvis-meg-eeg')) # let's also morph to fsaverage stc_to = stc_meg.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-meg')) stc_to = stc_eeg.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-eeg')) stc_to = stc.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-meg-eeg')) ############################################################################### # Do one dipole fitting evoked = evoked.pick_types(meg=True, eeg=False) evoked.crop(0.04, 0.095) bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif') dip, _ = mne.fit_dipole(evoked, noise_cov, bem, trans) dip.save(join(meg_dir, 'sample_audvis_set1.dip'))
# estimates the correlation among the data covariance matrices of different empty room recordings import mne import numpy as np import pylab as pl mats = [] er_fname = '/Users/sudregp/data/meg/empty_room_files.txt' er_dir = '/Users/sudregp/data/meg_empty_room/' fid = open(er_fname, 'r') er_files = [line.rstrip() for line in fid] noise_reg = .03 for er in er_files: er_raw = mne.fiff.Raw(er_dir + er, preload=True, compensation=3) picks = mne.fiff.pick_channels_regexp(er_raw.info['ch_names'], 'M..-*') # the later datasets are missing 2 channels, 'MLF25-1609', 'MLT31-1609', so we need to remove them if we want to do a correlation over time if len(picks)>271: picks = np.delete(picks, [32, 111]) er_raw.filter(l_freq=1, h_freq=50, picks=picks) noise_cov = mne.compute_raw_data_covariance(er_raw, picks=picks) noise_cov = mne.cov.regularize(noise_cov, er_raw.info, mag=noise_reg) mats.append(noise_cov.data) diags = [np.diag(m) for m in mats] diags = np.array(diags) corr = np.corrcoef(diags) pl.imshow(corr) pl.colorbar()
def run(): args = sys.argv if len(args) <= 1: print 'Usage: run_meg_tutorial.sh <sample data directory>' return sample_dir = args[1] subjects_dir = join(sample_dir, 'subjects') meg_dir = join(sample_dir, 'MEG', 'sample') os.environ['SUBJECTS_DIR'] = subjects_dir os.environ['MEG_DIR'] = meg_dir subject = 'sample' src = setup_source_space(subject, fname=True, spacing='oct6', n_jobs=2, overwrite=True) # If one wanted to use other source spaces, these types of options are # available src_fsaverage = setup_source_space('fsaverage', fname=True, spacing='ico5', n_jobs=2, overwrite=True, add_dist=False) morph_source_spaces(src_fsaverage, subject_to='sample') setup_source_space(subject, fname=True, spacing='all', overwrite=True, n_jobs=2, add_dist=False) # Add distances to source space (if desired, takes a long time) bem_dir = join(subjects_dir, join('sample', 'bem')) os.rename(join(bem_dir, 'sample-oct-6-src.fif'), join(bem_dir, 'sample-oct-6-orig-src.fif')) new_src = add_source_space_distances(src, dist_limit=0.007) new_src.save(join(bem_dir, 'sample-oct-6-src.fif')) # Preprocessing raw = mne.io.Raw(join(meg_dir, 'sample_audvis_raw.fif'), preload=True) raw.info['bads'] = ['MEG 2443', 'EEG 053'] reject = dict(grad=3000e-13, mag=4000e-15, eeg=100e-6) ecg_proj, _ = mne.preprocessing.compute_proj_ecg(raw, l_freq=1, h_freq=100, ch_name='MEG 1531', reject=reject) eog_proj, _ = mne.preprocessing.compute_proj_eog(raw, l_freq=1, h_freq=35, reject=reject, no_proj=True) events = mne.find_events(raw) mne.write_events(join(meg_dir, 'sample_audvis_raw-eve.fif'), events) event_id = [1, 2, 3, 4] tmin, tmax = -0.2, 0.5 picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=True, eog=True) # Average with no filter epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks) evoked = epochs.average() evoked.save(join(meg_dir, 'sample_audvis-no-filter-ave.fif')) raw.filter(l_freq=None, h_freq=40) raw_resampled = raw.resample(150) raw_resampled.save(join(meg_dir, 'sample_audvis_filt-0-40_raw.fif'), overwrite=True) raw.add_proj(ecg_proj) raw.add_proj(eog_proj) resampled_events = mne.find_events(raw_resampled) mne.write_events(join(meg_dir, 'sample_audvis_filt-0-40_raw-eve.fif'), resampled_events) # Average with filter epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks) evoked = epochs.average() evoked.save(join(meg_dir, 'sample_audvis-ave.fif')) # Compute the noise covariance matrix noise_cov = mne.compute_raw_data_covariance(raw, picks=picks) noise_cov.save(join(meg_dir, 'audvis.cov')) # Compute the empty-room noise covariance matrix ernoise_raw = mne.io.Raw(join(meg_dir, 'ernoise_raw.fif'), preload=True) ernoise_raw.info['bads'] = ['MEG 2443'] ernoise_raw.filter(l_freq=None, h_freq=40) picks = mne.pick_types(ernoise_raw.info, meg=True, eeg=True, stim=True, eog=True) ernoise_cov = mne.compute_raw_data_covariance(ernoise_raw, picks=picks) ernoise_cov.save(join(meg_dir, 'ernoise.cov')) ############################################################################### # Compute forward solution a.k.a. lead field trans = join(meg_dir, 'sample_audvis_raw-trans.fif') bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif') # for MEG only fname = join(meg_dir, 'sample_audvis-meg-oct-6-fwd.fif') fwd_meg = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=True, eeg=False, mindist=5.0, n_jobs=2, overwrite=True) # for EEG only bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-5120-5120-bem-sol.fif') fname = join(meg_dir, 'sample_audvis-eeg-oct-6-fwd.fif') fwd_eeg = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=False, eeg=True, mindist=5.0, n_jobs=2, overwrite=True) # for both EEG and MEG fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-fwd.fif') fwd = mne.make_forward_solution(raw.info, trans, src, bem, fname=fname, meg=True, eeg=True, mindist=5.0, n_jobs=2, overwrite=True) # Create various sensitivity maps grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='free') grad_map.save(join(meg_dir, 'sample_audvis-grad-oct-6-fwd-sensmap'), ftype='w') mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='free') mag_map.save(join(meg_dir, 'sample_audvis-mag-oct-6-fwd-sensmap'), ftype='w') eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='free') eeg_map.save(join(meg_dir, 'sample_audvis-eeg-oct-6-fwd-sensmap'), ftype='w') grad_map2 = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed') grad_map2.save(join(meg_dir, 'sample_audvis-grad-oct-6-fwd-sensmap-2'), ftype='w') mag_map2 = mne.sensitivity_map(fwd, ch_type='mag', mode='ratio') mag_map2.save(join(meg_dir, 'sample_audvis-mag-oct-6-fwd-sensmap-3'), ftype='w') # Compute some with the EOG + ECG projectors projs = ecg_proj + eog_proj + raw.info['projs'] for map_type in ['radiality', 'angle', 'remaining', 'dampening']: eeg_map = mne.sensitivity_map(fwd, projs=projs, ch_type='eeg', mode=map_type) eeg_map.save(join(meg_dir, 'sample_audvis-eeg-oct-6-fwd-sensmap-' + map_type)) ############################################################################### # Compute MNE inverse operators # # Note: The MEG/EEG forward solution could be used for all # inv_meg = make_inverse_operator(raw.info, fwd_meg, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-inv.fif') write_inverse_operator(fname, inv_meg) inv_eeg = make_inverse_operator(raw.info, fwd_eeg, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-eeg-oct-6-eeg-inv.fif') write_inverse_operator(fname, inv_eeg) inv = make_inverse_operator(raw.info, fwd, noise_cov, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif') write_inverse_operator(fname, inv) # inverse operator with fixed orientation (for testing). Not implemented #inv_fixed = make_inverse_operator(raw.info, fwd_meg, noise_cov, # depth=None, fixed=True) #fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-nodepth-fixed-inv.fif') #write_inverse_operator(fname, inv_fixed) # produce two with diagonal noise (for testing) diag = noise_cov.as_diag() inv_meg_diag = make_inverse_operator(raw.info, fwd_meg, diag, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-oct-6-meg-diagnoise-inv.fif') write_inverse_operator(fname, inv_meg_diag) inv_eeg_diag = make_inverse_operator(raw.info, fwd, diag, loose=0.2) fname = join(meg_dir, 'sample_audvis-meg-eeg-oct-6-meg-eeg-diagnoise-inv.fif') write_inverse_operator(fname, inv_eeg_diag) # Produce stc files evoked.crop(0, 0.25) stc_meg = apply_inverse(evoked, inv_meg, method='MNE') stc_meg.save(join(meg_dir, 'sample_audvis-meg')) stc_eeg = apply_inverse(evoked, inv_eeg, method='MNE') stc_eeg.save(join(meg_dir, 'sample_audvis-eeg')) stc = apply_inverse(evoked, inv, method='MNE') stc.save(join(meg_dir, 'sample_audvis-meg-eeg')) # let's also morph to fsaverage stc_to = stc_meg.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-meg')) stc_to = stc_eeg.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-eeg')) stc_to = stc.morph('fsaverage', grade=3, smooth=12) stc_to.save(join(meg_dir, 'fsaverage_audvis-meg-eeg')) ############################################################################### # Do one dipole fitting evoked = evoked.pick_types(meg=True, eeg=False) evoked.crop(0.04, 0.095) bem = join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif') dip, _ = mne.fit_dipole(evoked, noise_cov, bem, trans) dip.save(join(meg_dir, 'sample_audvis_set1.dip'))
dir_out = "/users/sudregp/data/meg/" fid = open(subjs_fname, "r") subjs = [line.rstrip() for line in fid] fid.close() # generating source space frequency envelopes for subj in subjs: raw_fname = data_dir + "fifs/%s_rest_LP100_CP3_DS300_raw.fif" % subj fwd_fname = data_dir + "analysis/rest/%s_rest_LP100_CP3_DS300_raw-5-fwd.fif" % subj forward = mne.read_forward_solution(fwd_fname, surf_ori=True) for l_freq, h_freq in bands: # we need to always reload raw because we bandpass and resample it raw = mne.fiff.Raw(raw_fname, preload=True) picks = mne.fiff.pick_channels_regexp(raw.info["ch_names"], "M..-*") raw.filter(l_freq, h_freq, picks=picks) data_cov = mne.compute_raw_data_covariance(raw, picks=picks) weights = calculate_weights(forward, data_cov.data, reg=reg) # downsample to 1 Hz effective sampling resolution. Note that the paper did this after beamforming, but we are safe to do it here as long as we get the covariance matrices before that. It'll make it go faster this way raw.resample(1) # instead of getting the hilbert of the source space (costly), do the Hilbert first and compute the envelope later raw.apply_hilbert(picks, envelope=False) data, times = raw[picks, :] print "Multiplying data by beamformer weights..." # get the abs() of Hilbert transform (Hilbert envelope) sol = abs(np.dot(weights, data)) stc = mne.SourceEstimate( sol, [forward["src"][0]["vertno"], forward["src"][1]["vertno"]], times[0], times[1] - times[0], subject=subj ) stc.save(dir_out + "lcmv-%dto%d-" % (l_freq, h_freq) + subj) # # morph all subjects
# Setup for reading the raw data raw = io.Raw(raw_fname, preload=True) raw.filter(1, 20, method='iir') # replace baselining with high-pass events = read_events(event_fname) raw.info['bads'] = ['MEG 2443'] # set bad channels picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude='bads') # Epoching epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=False) # Plot image epoch before xdawn plot_image_epochs(epochs['vis_r'], picks=[230], vmin=-500, vmax=500) # Estimates signal covariance signal_cov = compute_raw_data_covariance(raw, picks=picks) # Xdawn instance xd = Xdawn(n_components=2, signal_cov=signal_cov) # Fit xdawn xd.fit(epochs) # Denoise epochs epochs_denoised = xd.apply(epochs) # Plot image epoch after xdawn plot_image_epochs(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
def test_rank(): """Test cov rank estimation""" raw_sample = Raw(raw_fname) raw_sss = Raw(hp_fif_fname) raw_sss.add_proj(compute_proj_raw(raw_sss)) cov_sample = compute_raw_data_covariance(raw_sample) cov_sample_proj = compute_raw_data_covariance( raw_sample.copy().apply_proj()) cov_sss = compute_raw_data_covariance(raw_sss) cov_sss_proj = compute_raw_data_covariance( raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list(itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)] )) for (cov, picks_list, this_info), scalings in iter_tests: for ch_type, picks in picks_list: this_very_info = pick_info(this_info, picks) # compute subset of projs this_projs = [c['active'] and len(set(c['data']['col_names']) .intersection(set(this_very_info['ch_names']))) > 0 for c in cov['projs']] n_projs = sum(this_projs) # count channel types ch_types = [channel_type(this_very_info, idx) for idx in range(len(picks))] n_eeg, n_mag, n_grad = [ch_types.count(k) for k in ['eeg', 'mag', 'grad']] n_meg = n_mag + n_grad if ch_type in ('all', 'eeg'): n_projs_eeg = 1 else: n_projs_eeg = 0 # check sss if 'proc_history' in this_very_info: mf = this_very_info['proc_history'][0]['max_info'] n_free = _get_sss_rank(mf) if 'mag' not in ch_types and 'grad' not in ch_types: n_free = 0 # - n_projs XXX clarify expected_rank = n_free + n_eeg if n_projs > 0 and ch_type in ('all', 'eeg'): expected_rank -= n_projs_eeg else: expected_rank = n_meg + n_eeg - n_projs C = cov['data'][np.ix_(picks, picks)] est_rank = _estimate_rank_meeg_cov(C, this_very_info, scalings=scalings) assert_equal(expected_rank, est_rank)
def test_compute_LF_matrix(): import os import os.path as op import nipype.pipeline.engine as pe from nipype.interfaces.mne import WatershedBEM import mne import mne.io as io from mne.minimum_norm import make_inverse_operator, apply_inverse_raw from mne.report import Report from nipype.utils.filemanip import split_filename as split_f main_path = '/home/karim/Documents/pasca/data/resting_state/' sbj_id = 'K0002' sbj_dir = op.join(main_path, 'FSF') bem_dir = op.join(sbj_dir, sbj_id, 'bem') surface_dir = op.join(sbj_dir, sbj_id, 'bem/watershed') data_dir = op.join(main_path, 'MEG') raw_fname = op.join(data_dir, '%s/%s_rest_tsss_mc.fif' % (sbj_id, sbj_id)) raw = io.Raw(raw_fname, preload=True) picks = mne.pick_types(raw.info, meg=True, ref_meg=False, exclude='bads') raw.filter(l_freq=0.1, h_freq=300, picks=picks, method='iir', n_jobs=2) raw.resample(sfreq=300, npad=0) report = Report() surfaces = [sbj_id + '_brain_surface', sbj_id + '_inner_skull_surface', sbj_id + '_outer_skull_surface', sbj_id + '_outer_skin_surface'] new_surfaces = ['brain.surf', 'inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'] sbj_inner_skull_fname = op.join(bem_dir, sbj_id + '-' + new_surfaces[1]) inner_skull_fname = op.join(bem_dir, new_surfaces[1]) if not (op.isfile(sbj_inner_skull_fname) or op.isfile(inner_skull_fname)): bem_IF = WatershedBEM() bem_IF.inputs.subject_id = sbj_id bem_IF.inputs.subjects_dir = sbj_dir bem_IF.inputs.atlas_mode = True bem_IF.run() for i in range(len(surfaces)): os.system('cp %s %s' % (op.join(surface_dir, surfaces[i]), op.join(bem_dir, sbj_id + '-' + new_surfaces[i]))) else: print '*** inner skull surface exists!!!' bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % sbj_id) if not op.isfile(bem): os.system('$MNE_ROOT/bin/mne_setup_forward_model --subject ' + sbj_id + ' --homog --surf --ico 4') else: print '*** BEM solution file exists!!!' src_fname = op.join(bem_dir, '%s-ico-5-src.fif' % sbj_id) if not op.isfile(src_fname): src = mne.setup_source_space(sbj_id, fname=True, spacing='ico5', subjects_dir=sbj_dir, overwrite=True, n_jobs=2) else: print '*** source space file exists!!!' src = mne.read_source_spaces(src_fname) trans_fname = op.join(data_dir, '%s/%s-trans.fif' % (sbj_id, sbj_id)) data_path, basename, ext = split_f(raw_fname) fwd_filename = op.join(data_path, '%s-fwd.fif' % basename) forward = mne.make_forward_solution(raw_fname, trans_fname, src, bem, fwd_filename, n_jobs=2, overwrite=True) forward = mne.convert_forward_solution(forward, surf_ori=True) snr = 1.0 lambda2 = 1.0 / snr ** 2 method = 'MNE' reject = dict(mag=4e-12, grad=4e-10, eog=0.00025) noise_cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject) inverse_operator = make_inverse_operator(raw.info, forward, noise_cov, loose=0.2, depth=0.8) start, stop = raw.time_as_index([0, 30]) stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label=None, start=start, stop=stop, pick_ori=None) print '***' stc.shape print '***' subj_path, basename, ext = split_f(raw_fname) stc_filename = op.join(subj_path, basename) stc.save(stc_filename) report_filename = op.join(subj_path, basename + '-BEM-report.html') print report_filename report.save(report_filename, open_browser=False, overwrite=True) return
def intra(subj_list, fmin, fmax): ''' Performs main process, including generation of inverse solution and PSD computation. ''' for subj in subj_list: print('Now beginning intra processing on ' + subj + '...\n') * 5 # Set function parameters fname_raw = data_path + subj[:5] + '/' + subj fname_fwd = data_path + subj[:5] + '/' + subj[:-4] + '-ico-4-fwd.fif' # Load data raw = fiff.Raw(fname_raw) forward_meg = mne.read_forward_solution(fname_fwd) # Estimate noise covariance from the raw data precov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6)) write_cov(data_path + subj[:5] + '/' + subj[:-4] + '-cov.fif', precov) # Find events from raw file events = mne.find_events(raw, stim_channel='STI 014') # Write events to file mne.write_events(data_path + subj[:5] + '/' + subj[:-4] + '-eve.fif', events) # Set up pick list: include = [] exclude = raw.info['bads'] picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, include=include, exclude=exclude) # Read epochs and remove bad epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) # Average epochs to produce an evoked dataset, then write to disk evoked = epochs.average() evoked.save(data_path + subj[:5] + '/' + subj[:-4] + '-ave.fif') # Regularize noise cov cov = mne.cov.regularize(precov, evoked.info, grad=0.05, mag=0.05, eeg=0.1, proj=True) # Restrict forward solution as necessary for MEG restricted_fwd = mne.fiff.pick_types_forward(forward_meg, meg=True, eeg=False) # Make inverse operator info = evoked.info inverse_operator = make_inverse_operator(info, restricted_fwd, cov, loose=None, depth=0.8) # Pull data for averaging later epc_array = epochs.get_data() # Compute the inverse solution inv = apply_inverse(evoked, inverse_operator, lambda2, "dSPM", pick_normal=False) inv.save(data_path + subj[:5] + '/' + subj[:-4] + '-inv.fif') # picks MEG gradiometers picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, exclude=exclude) # Compute source power spectral density and save to file psd = compute_source_psd(raw, inverse_operator, method='dSPM', lambda2=lambda2, fmin=fmin, fmax=fmax, NFFT=2048) psd.save(data_path + subj[:5] + '/' + subj[:-4] + '-psd.fif')
Process the files: <subject>_emptyroom.fif Compute the noise matrix Author: Praveen, Qunxi """ import mne, sys, os import pylab as pl import numpy as np flow, fhigh = 1.0, 45.0 filter_type = 'butter' filter_order = 4 njobs = 4 try: subject = sys.argv[1]#Get the subject except: print "Please run with input file provided. Exiting" sys.exit() subjects_dir = '/home/qdong/freesurfer/subjects/' subject_path = subjects_dir + subject#Set the data path of the subject raw_empty_fname = subject_path + '/MEG/%s_emptyroom.fif' %subject raw_empty = mne.fiff.Raw(raw_empty_fname, preload=True) #Filter the empty room data picks_empty = mne.fiff.pick_types(raw_empty.info, meg=True, eeg=False, eog=True, ecg=True, stim=False, exclude='bads') raw_empty.filter(flow, fhigh, picks=picks_empty, n_jobs=njobs, method='iir', iir_params={'ftype': filter_type, 'order': filter_order}) #Get the basename raw_empty_basename = os.path.splitext(os.path.basename(raw_empty_fname))[0] cov = mne.compute_raw_data_covariance(raw_empty, picks=picks_empty) mne.write_cov(subject_path+'/MEG/%s_cov.fif' %(raw_empty_basename), cov)
dir_out = '/users/sudregp/data/meg/' fid = open(subjs_fname, 'r') subjs = [line.rstrip() for line in fid] fid.close() # generating source space frequency envelopes for subj in subjs: raw_fname = data_dir + 'fifs/%s_rest_LP100_CP3_DS300_raw.fif' % subj fwd_fname = data_dir + 'analysis/rest/%s_rest_LP100_CP3_DS300_raw-5-fwd.fif' % subj forward = mne.read_forward_solution(fwd_fname, surf_ori=True) for l_freq, h_freq in bands: # we need to always reload raw because we bandpass and resample it raw = mne.fiff.Raw(raw_fname, preload=True) picks = mne.fiff.pick_channels_regexp(raw.info['ch_names'], 'M..-*') raw.filter(l_freq, h_freq, picks=picks) data_cov = mne.compute_raw_data_covariance(raw, picks=picks) weights = calculate_weights(forward, data_cov.data, reg=reg) # downsample to 1 Hz effective sampling resolution. Note that the paper did this after beamforming, but we are safe to do it here as long as we get the covariance matrices before that. It'll make it go faster this way raw.resample(1) # instead of getting the hilbert of the source space (costly), do the Hilbert first and compute the envelope later raw.apply_hilbert(picks, envelope=False) data, times = raw[picks, :] print 'Multiplying data by beamformer weights...' # get the abs() of Hilbert transform (Hilbert envelope) sol = abs(np.dot(weights, data)) stc = mne.SourceEstimate( sol, [forward['src'][0]['vertno'], forward['src'][1]['vertno']], times[0], times[1] - times[0], subject=subj) stc.save(dir_out + 'lcmv-%dto%d-' % (l_freq, h_freq) + subj)
# License: BSD (3-clause) print __doc__ import mne from mne import fiff from mne.datasets import sample data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis_raw.fif' raw = fiff.Raw(fname) include = [] # or stim channels ['STI 014'] raw.info['bads'] += ['EEG 053'] # bads + 1 more # pick EEG channels picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True, include=include, exclude='bads') # setup rejection reject = dict(eeg=80e-6, eog=150e-6) # Compute the covariance from the raw data cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject) print cov ############################################################################### # Show covariance mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True) # try setting proj to False to see the effect
def intra(subj): """ Performs initial computations within subject and returns average PSD and variance of all epochs. """ print ("Now beginning intra processing on " + subj + "...\n") * 5 # Set function parameters fname_label = subjects_dir + "/" + subj + "/" + "label/%s.label" % label_name fname_raw = data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif" if os.path.isfile(data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif"): fname_fwd = data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif" else: print ("Subject " + subj + " does not have a ico-4-fwd.fif on file.") if label_name.startswith("lh."): hemi = "left" elif label_name.startswith("rh."): hemi = "right" # Load data label = mne.read_label(fname_label) raw = fiff.Raw(fname_raw) forward_meg = mne.read_forward_solution(fname_fwd) # Estimate noise covariance from the raw data. precov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6)) write_cov(data_path + subj + "/" + subj + "-cov.fif", precov) # Find events from raw file events = mne.find_events(raw, stim_channel="STI 014") # Set up pick list: (MEG minus bad channels) include = [] exclude = raw.info["bads"] picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude=exclude) # Read epochs and remove bad epochs epochs = mne.Epochs( raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6), ) # Average epochs and get an evoked dataset. Save to disk. evoked = epochs.average() evoked.save(data_path + subj + "/" + subj + "_list" + list_num + "_rest_raw_sss-ave.fif") # Regularize noise cov cov = mne.cov.regularize(precov, evoked.info, grad=4000e-13, mag=4e-12, eog=150e-6, proj=True) # Make inverse operator info = evoked.info inverse_operator = make_inverse_operator(info, forward_meg, cov, loose=None, depth=0.8) # Pull data for averaging later epc_array = epochs.get_data() # Compute the inverse solution inv = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label=label) # Need to add a line here to automatically create stc directory within subj epoch_num = 1 epoch_num_str = str(epoch_num) for i in inv: i.save(data_path + subj + "/tmp/" + label_name[3:] + "_rest_raw_sss-ico-4-inv" + epoch_num_str) epoch_num = epoch_num + 1 epoch_num_str = str(epoch_num) # The following is used to remove the empty opposing hemisphere files # and then move the files to save into the appropriate directory if hemi == "left": filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")] for f in filelist: os.remove(data_path + subj + "/tmp/" + f) keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")] for f in keepers: src = f os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/inv/" + src) elif hemi == "right": filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")] for f in filelist: os.remove(data_path + subj + "/tmp/" + f) keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")] for f in keepers: src = f os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/inv/" + src) # define frequencies of interest bandwidth = 4.0 # bandwidth of the windows in Hz # compute source space psd in label # Note: By using "return_generator=True" stcs will be a generator object # instead of a list. This allows us so to iterate without having to # keep everything in memory. psd = compute_source_psd_epochs( epochs, inverse_operator, lambda2=lambda2, method=method, fmin=fmin, fmax=fmax, bandwidth=bandwidth, label=label, return_generator=False, ) epoch_num = 1 epoch_num_str = str(epoch_num) for i in psd: i.save(data_path + subj + "/" + "tmp" + "/" + label_name[3:] + "_dspm_snr-1_PSD" + epoch_num_str) epoch_num = epoch_num + 1 epoch_num_str = str(epoch_num) if hemi == "left": filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")] for f in filelist: os.remove(data_path + subj + "/tmp/" + f) keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")] for f in keepers: src = f os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/psd/" + src) elif hemi == "right": filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")] for f in filelist: os.remove(data_path + subj + "/tmp/" + f) keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")] for f in keepers: src = f os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/psd/" + src) # This code computes the average PSDs of each epoch. Each PSD file is an array of shape N_vertices*N_frequencies. This code averages the PSD value of each vertex together and outputs the average PSD value of each frequency. Then, it averages the PSD values of each epoch, outputting one average PSD value per frequency value, i.e., this is the average across epochs. n_epochs = len(epc_array) for i, stc in enumerate(psd): if i >= n_epochs: break if i == 0: psd_avg = np.mean(stc.data, axis=0) else: psd_avg += np.mean(stc.data, axis=0) print ("Length of psd for subject " + subj + " is " + str(len(psd)) + ".") print ("Number of epochs for subject " + subj + " is " + str(n_epochs) + ".") if len(psd) != 0: psd_avg /= n_epochs # Compute variance for each epoch and then variance across epochs n_epochs = len(epc_array) for i, stc in enumerate(psd): if i >= n_epochs: psd_var = np.array() break if i == 0: psd_var = np.var(stc.data, axis=0) else: psd_var = np.vstack((psd_var, np.var(stc.data, axis=0))) if len(psd) >= 2: tot_var = np.var(psd_var, axis=0) if len(psd) <= 1: failed_subj = subj print (failed_subj + " failed. No PSD values calculated, likely because all epochs were rejected.") return failed_subj, failed_subj, failed_subj if len(psd) >= 2: return (psd_avg, tot_var, len(psd_avg))
Estimate covariance matrix from a raw FIF file ============================================== """ # Author: Alexandre Gramfort <*****@*****.**> # # License: BSD (3-clause) print __doc__ import mne from mne import fiff from mne.datasets import sample data_path = sample.data_path('.') fname = data_path + '/MEG/sample/sample_audvis_raw.fif' raw = fiff.Raw(fname) # Set up pick list: MEG + STI 014 - bad channels cov = mne.compute_raw_data_covariance(raw, reject=dict(eeg=40e-6, eog=150e-6)) print cov ############################################################################### # Show covariance import pylab as pl pl.figure() pl.imshow(cov.data, interpolation="nearest") pl.title('Full covariance matrix') pl.show()
Estimate covariance matrix from a raw FIF file ============================================== """ # Author: Alexandre Gramfort <*****@*****.**> # # License: BSD (3-clause) print __doc__ import mne from mne import fiff from mne.datasets import sample data_path = sample.data_path('.') fname = data_path + '/MEG/sample/sample_audvis_raw.fif' raw = fiff.Raw(fname) # Compute the covariance from the raw data cov = mne.compute_raw_data_covariance(raw, reject=dict(eeg=80e-6, eog=150e-6)) print cov ############################################################################### # Show covariance mne.viz.plot_cov(cov, raw.info, exclude=raw.info['bads'], colorbar=True, proj=True) # try setting proj to False to see the effect
print runID data_path = '/home/custine/MEG/data/krns_kr3/' +subjID+'/'+sessID fname = data_path +'/'+ subjID + '_'+ sessID +'_'+runID +'_raw.fif' print fname cname = data_path +'/cov/'+ subjID + '_'+ sessID +'_' + runID + '-cov.fif' covLog_file = data_path + '/logs/' +subjID + '_' + sessID+ '_'+runID + '_cov.log' event_file = data_path + '/eve/triggers/' + subjID + '_'+ sessID +'_'+runID +'_' + eve_file mne.set_log_file(fname = covLog_file, overwrite = True) print covLog_file print 'Reading Raw data... ' raw = io.Raw(fname) if runID == "emptyroom": tmin = 0 tmax = 2 cov = mne.compute_raw_data_covariance(raw, tmin = tmin, tmax = tmax) #, tmin = None, tmax = 0) #, reject = None, picks = picks) print cov else: events = mne.read_events(event_file) include = [] # or stim channels ['STI 014'] # #raw.info['bads'] += ['EEG 053'] # bads + 1 more # # pick EEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude='bads') reject = dict(mag = 4e-12, grad = 4000e-13) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline = (None,0), picks = picks, proj = True, preload = True, reject=reject) print epochs # ##Example: reject = dict(grad=4000e-13, # T / m (gradiometers) # mag=4e-12, # T (magnetometers) # eeg=40e-6, # uV (EEG channels) # eog=250e-6 # uV (EOG channels)
# estimates the correlation among the data covariance matrices of different empty room recordings import mne import numpy as np import pylab as pl mats = [] er_fname = '/Users/sudregp/data/meg/empty_room_files.txt' er_dir = '/Users/sudregp/data/meg_empty_room/' fid = open(er_fname, 'r') er_files = [line.rstrip() for line in fid] noise_reg = .03 for er in er_files: er_raw = mne.fiff.Raw(er_dir + er, preload=True, compensation=3) picks = mne.fiff.pick_channels_regexp(er_raw.info['ch_names'], 'M..-*') # the later datasets are missing 2 channels, 'MLF25-1609', 'MLT31-1609', so we need to remove them if we want to do a correlation over time if len(picks) > 271: picks = np.delete(picks, [32, 111]) er_raw.filter(l_freq=1, h_freq=50, picks=picks) noise_cov = mne.compute_raw_data_covariance(er_raw, picks=picks) noise_cov = mne.cov.regularize(noise_cov, er_raw.info, mag=noise_reg) mats.append(noise_cov.data) diags = [np.diag(m) for m in mats] diags = np.array(diags) corr = np.corrcoef(diags) pl.imshow(corr) pl.colorbar()