def test_event(): """Test that on_process event gets fired""" handler = Mock() params = mnefun.Params() setdefaults(params) params.on_process = handler with patch('mnefun._mnefun.save_epochs') as func: mnefun.do_processing( params, write_epochs=True, ) handler.assert_called_with('Doing epoch EQ/DQ', func, func(), params)
def params(tmpdir): """Param fixture.""" params = mnefun.Params() params.work_dir = str(tmpdir) params.score = Mock() params.subject_indices = [] params.subjects = [None] params.structurals = [None] params.dates = [None] params.in_names = [] params.in_numbers = [] params.analyses = [] params.out_names = [] params.out_numbers = [] params.must_match = [] params.decim = 5 params.plot_drop_logs = False return params
##ECG_channel = 'MEG1531'/NO EOG channel: # 110, 114, 117, 118, 135 + 131, 145 ##ECG_channel = 'MEG0143'/NO EOG channel # 130 ##No heart artifact in sss data/NO EOG channel # 133, 134 ## 120 Only 8/1076 good ECG epochs found (bad coils) ## 135 Only 12/1076 good ECG epochs found (bad coils) params = mnefun.Params(tmin=-0.1, tmax=1.2, n_jobs=18, decim=2, proj_sfreq=200, n_jobs_fir='cuda', filter_length='5s', lp_cut=80., n_jobs_resample='cuda', bmin=-0.1, bem_type='5120', ecg_channel='MEG1531') params.subjects = [ 'sasi_110', 'sasi_114', 'sasi_117', 'sasi_118', 'sasi_120', 'sasi_121', 'sasi_129', 'sasi_130', 'sasi_131', 'sasi_133', 'sasi_134', 'sasi_135', 'sasi_137', 'sasi_141', 'sasi_143', 'sasi_144', 'sasi_145', 'sasi_147' ] params.structurals = params.subjects params.dates = [(2013, 0, 00)] * len(params.subjects) params.subject_indices = np.setdiff1d(np.arange(len(params.subjects)), [])
# to 5 by default, which makes the negative stopping frequency (0.03-5). # It appears that currently data are acquired (online) using bandpass filter # (0.03 - 326.4 Hz), so it might be okay not doing offline highpass filtering. # It's worth checking later though. However, I think we should do baseline # correction by setting bmin and bmax. I found that mnefun does baseline # correction by default. # sjjoo_20160809: Commented params = mnefun.Params( tmin=-0.1, tmax=0.9, n_jobs=18, # t_adjust was -39e-3 decim=2, n_jobs_mkl=1, proj_sfreq=250, n_jobs_fir='cuda', n_jobs_resample='cuda', filter_length='5s', epochs_type='fif', lp_cut=40., # hp_cut=0.15,hp_trans=0.1, bmin=-0.1, auto_bad=20., plot_raw=False, bem_type='5120-5120-5120') # This sets the position of the head relative to the sensors. These values a # A typical head position. So now in sensor space everyone is aligned. However # We should also note that for source analysis it is better to leave this as # the mne-fun default params.trans_to = (0., 0., .035)
# tmin, tmax: sets the epoch # bmin, bmax: sets the prestim duration for baseline correction. baseline is set # as individual as default. Refer to _mnefun.py bmax is 0.0 by default # hp_cut, lp_cut: set cutoff frequencies for highpass and lowpass # I found that hp_cut of 0.03 is problematic because the tansition band is set # to 5 by default, which makes the negative stopping frequency (0.03-5). # It appears that currently data are acquired (online) using bandpass filter # (0.03 - 326.4 Hz), so it might be okay not doing offline highpass filtering. # It's worth checking later though. However, I think we should do baseline # correction by setting bmin and bmax. I found that mnefun does baseline # correction by default. # sjjoo_20160809: Commented params = mnefun.Params(tmin=-0.2, tmax=1.0, t_adjust=-39e-3, n_jobs=18, decim=2, n_jobs_mkl=1, proj_sfreq=250, n_jobs_fir='cuda', n_jobs_resample='cuda', filter_length='5s', epochs_type='fif', lp_cut=40, bmin=-0.2, auto_bad=20., plot_raw=False, bem_type = '5120') <<<<<<< HEAD # This sets the position of the head relative to the sensors. These values a # A typical head position. So now in sensor space everyone is aligned. However # We should also note that for source analysis it is better to leave this as # the mne-fun default params.trans_to = (0., 0., .04) ======= params.trans_to = (0., 0., .04) # Align each subject to a common reference frame >>>>>>> a4cd458d621a2dab85571599080677eb96297c75 params.sss_type = 'python' params.sss_regularize = 'svd' # 'in' by default params.tsss_dur = 10. # 60 for adults with not much head movements. This was set to 6.
"both", subjects_dir=defaults.subjects_dir) rois = [roi for roi in all_rois if not roi.name.startswith("unknown")] roi_nms = [rr.name for rr in rois] n = len(roi_nms) laplacian = np.zeros((len(df), len(defaults.bands), n)) degree = np.zeros_like(laplacian) A_lst = list() reject = dict(grad=2000e-13, mag=6000e-15) # same as GenZ repo src_fs = mne.read_source_spaces( op.join(defaults.subjects_dir, "fsaverage", "bem", "fsaverage-ico-5-src.fif")) #TODO mv to YAML state = "task" # task/rest if state == "task": p = mnefun.Params() p.work_dir = defaults.megdata p.sss_fif_tag = "_raw_sss.fif" p.run_names = [ "%s_faces_learn_01", "%s_thumbs_learn_01", "%s_emojis_learn_01", "%s_faces_test_01", "%s_thumbs_test_01", "%s_emojis_test_01", ] p.lp_cut = 80 # Subject loop for si, ss in enumerate(df.id.values): subject = f"genz{ss}"
pitchacc_07_02 has 52 speech trials - not sure if they are usable @author: blau """ import mnefun import numpy as np from score import score params = mnefun.Params(tmin=-0.2, tmax=2, n_jobs=8, decim=2, proj_sfreq=200, n_jobs_fir='cuda', filter_length='auto', lp_cut=80., n_jobs_resample='cuda', cov_method='shrunk', bmin=-0.1, bem_type='5120') params.subjects_dir = 'subjects' params.subjects = ['pitchacc_108'] params.structurals = params.subjects params.dates = [None] * len(params.subjects) # None used to fully anonymize params.score = score # scoring function used to slice data into trials # define which subjects to run params.subject_indices = np.arange(len(params.subjects)) params.plot_drop_logs = True # Set parameters for remotely connecting to acquisition computer
from mne.stats import ttest_1samp_no_p from mne.minimum_norm import (read_inverse_operator, apply_inverse) import mnefun from mnefun import anova_time from mnefun import get_fsaverage_medial_vertices __copyright__ = "Copyright 2015, ILABS" __status__ = "Development" log(verbose='Warning') # cd to meg directory os.chdir('/media/ALAYA/data/ilabs/nlr/') work_dir = os.getcwd() # set up mnefun parameters of interest p = mnefun.Params(lp_cut=40.) p.analyses = ['Words_noise'] p.subjects = ['nlr01', 'nlr02', 'nlr04', 'nlr05', 'nlr06', 'nlr07', 'nlr08'] p.structurals = ['nlr01', 'nlr02', 'nlr04', 'nlr05', 'nlr06', 'nlr07', 'nlr08'] do_plots = False reload_data = True do_contrasts = True do_anova = False # Local variables lambda2 = 1. / 9. n_smooth = 15 fs_verts = [np.arange(10242), np.arange(10242)] fs_medial = get_fsaverage_medial_vertices() inv_type = 'meg-fixed' # can be meg-eeg, meg-fixed, meg, eeg-fixed, or eeg
""" Created on Fri May 6 7:11:32 2016 @author: mdclarke mnefun processing script for face to face """ import mnefun import numpy as np from score import score n_cores = 4 params = mnefun.Params(tmin=-0.1, tmax=7.0, n_jobs=n_cores, n_jobs_mkl=1, lp_cut=80., lp_trans=3., n_jobs_fir=n_cores, n_jobs_resample=n_cores, bmin=-0.1, decim=10, proj_sfreq=200, filter_length='auto') # Notes # 108 has 19s for 13s params.subjects = [ 'f2f_009_01_OTP', 'f2f_010_01_OTP', 'f2f_011_01_OTP', 'f2f_012_01_OTP', 'f2f_013_01_OTP', 'f2f_014_01_OTP', 'f2f_015_01_OTP', 'f2f_018_01_OTP', 'f2f_021_01_OTP', 'f2f_022_01_OTP', 'f2f_023_01_OTP', 'f2f_024_01_OTP', 'f2f_025_01_OTP', 'f2f_026_01_OTP', 'f2f_027_01_OTP', 'f2f_028_01_OTP', 'f2f_029_01_OTP', 'f2f_034_01_OTP', 'f2f_036_01_OTP', 'f2f_037_01_OTP', 'f2f_038_01_OTP', 'f2f_102_OTP', 'f2f_103_OTP', 'f2f_104_OTP',
for ch in ecg_chs: subjects = \ df[(df['SR(Hz)'] == sr) & (df['ECG'] == ch)]['Subject_ID'].tolist() if len(subjects) == 0: continue # noinspection PyTypeChecker print(' \nUsing %d Hz as sampling rate and\n' ' %s as ECG surrogate...' % (sr, ch)) print(' %d ' % len(subjects), 'Subjects: ', subjects) params = mnefun.Params(tmin=-0.1, tmax=0.6, n_jobs=18, n_jobs_fir='cuda', n_jobs_resample='cuda', proj_sfreq=200, decim=decim, filter_length='30s', hp_cut=.1, hp_trans='auto', lp_cut=30., lp_trans='auto', bmin=-0.1, ecg_channel=ch) params.subjects = ['bad_%s' % ss for ss in subjects] # write prebad for si, subj in enumerate(subjects): bad_channels = df[df['Subject_ID'] == subj]['BAD'].tolist() if op.exists(op.join(work_dir, params.subjects[si], 'raw_fif')): prebad_file = op.join(work_dir, params.subjects[si], 'raw_fif', '%s_prebad.txt' % params.subjects[si]) if not op.exists(prebad_file): if bad_channels[0] == 'None':
# License: MIT """Raw MEG data preprocessing script for resting state component of Adolescent project. Pipeline does.. 1. write MNEFUN prebads for ACQ file 2. process ACQ file as per MNEFUN:PARAMS""" import os.path as op import mnefun import numpy as np import pandas as pd from genz import defaults # TO METHODS params = mnefun.Params(n_jobs=18, decim=1, proj_sfreq=500, n_jobs_fir='cuda', n_jobs_resample='cuda', filter_length='auto', lp_cut=100., lp_trans='auto', bem_type='5120', tmin=-.2, tmax=.2) ## TODO:refactor to YAML ## dfs = [] for ag in [9, 11, 13, 15, 17]: fi = op.join(defaults.static, "GenZ_subject_information - %da group.tsv" % ag) dfs.append(pd.read_csv(fi, sep="\t", usecols=["Subject Number", "Sex"])) df = pd.concat(dfs) df.columns = ["id", "sex"] df.sort_values(by="id") dups = df[df.duplicated("id")].index.values.tolist() df.drop(df.index[dups], inplace=True) df.drop(df[df.id.isin(defaults.exclude)].index, inplace=True) df = df.dropna(how="all") picks = [sid.lower() for sid in df.id.values]
""" GenZ pilot analysis script. @author: Kambiz Tavabi @contact: [email protected] @license: MIT @date: 04/21/2018 """ import os.path as op import mnefun import numpy as np from picks import names, bad_channels params = mnefun.Params(n_jobs=18, decim=5, proj_sfreq=200, n_jobs_fir='cuda', n_jobs_resample='cuda', filter_length='auto', lp_cut=80., lp_trans='auto', bem_type='5120') params.subjects = names params.subject_indices = np.setdiff1d(np.arange(len(params.subjects)), np.array([3])) # write prebads for si, subj in enumerate(params.subjects): if op.exists(op.join(params.work_dir, subj, 'raw_fif')): prebad_file = op.join(params.work_dir, subj, 'raw_fif', '%s_prebad.txt' % subj) if not op.exists(prebad_file): assert len(bad_channels) == len(params.subjects) if bad_channels[si] is None:
for lst in sfreqs: sr = int(lst[2:6]) assert sr in [1200, 1800] if sr == 1200: decim = 2 else: decim = 3 ecg_channel = lst[-4:] subjects = pickedSubjects[lst] nsubjects = len(subjects) # noinspection PyTypeChecker params = mnefun.Params(n_jobs=18, decim=decim, proj_sfreq=200, n_jobs_fir='cuda', n_jobs_resample='cuda', filter_length='30s', lp_cut=80., ecg_channel='MEG%s' % ecg_channel) params.subjects = ['bad_%s' % s for s in subjects] params.structurals = [None] * nsubjects # None means use sphere params.score = None # defaults to passing events through params.dates = [None] * nsubjects # None means more fully anonymize params.subject_indices = np.arange( nsubjects) # Define which subjects to run # Set parameters for remotely connecting to acquisition computer params.acq_ssh = '*****@*****.**' params.acq_dir = '/data101/bad_baby' # Set parameters for remotely connecting to SSS workstation ('sws') params.sws_ssh = '*****@*****.**'
decim = 2 else: decim = 3 ecg_channel = lst[-4:] subjects = pickedSubjects[lst] nsubjects = len(subjects) # noinspection PyTypeChecker params = mnefun.Params(tmin=-0.1, tmax=1., n_jobs=18, n_jobs_fir='cuda', n_jobs_resample='cuda', decim=decim, proj_sfreq=200, filter_length='10s', hp_cut=32., hp_trans='auto', lp_trans='auto', lp_cut=48., bmin=-0.1, auto_bad=20., ecg_channel='MEG%s' % ecg_channel) params.subjects = ['bad_%s' % s for s in subjects] params.structurals = [None] * nsubjects # None means use sphere params.on_process = handler params.score = score params.dates = [(2013, 0, 00)] * len(params.subjects) params.subject_indices = np.setdiff1d(np.arange(len(params.subjects)), []) params.plot_drop_logs = False params.acq_ssh = '[email protected]' # minea
import mnefun from score import score import numpy as np try: # Use niprov as handler for events, or if it's not installed, ignore from niprov.mnefunsupport import handler except ImportError: handler = None params = mnefun.Params(tmin=-0.2, tmax=0.5, t_adjust=-4e-3, n_jobs=6, n_jobs_mkl=1, n_jobs_fir='cuda', n_jobs_resample='cuda', decim=5, proj_sfreq=200, filter_length='5s') params.subjects = ['subj_01', 'subj_02'] params.structurals = [None, 'AKCLEE_110_slim'] # None means use sphere params.dates = [(2014, 2, 14), None] # use "None" to more fully anonymize params.score = score # scoring function to use params.subject_indices = np.arange(2) # which subjects to run params.plot_drop_logs = False # turn off for demo or plots will block params.acq_ssh = 'minea' # can also be e.g., "*****@*****.**" params.acq_dir = '/sinuhe/data02/eric_non_space' params.sws_ssh = 'kasga' params.sws_dir = '/data06/larsoner'