# spectral_connectivity_parameters con_method = data_con['con_method'] epoch_window_length = data_con['epoch_window_length'] # sampling frequency sfreq = data_con['sfreq'] # When starting from raw MEG # (.fif) data, can be directly extracted from the file info frequency_node = get_frequency_band(freq_band_names, freq_bands) ############################################################################### # Then we create a node to pass input filenames to DataGrabber from nipype subject_ids = ['sub-0003'] # 'sub-0004', 'sub-0006' infosource = create_iterator(['subject_id', 'freq_band_name'], [subject_ids, freq_band_names]) ############################################################################### # and a node to grab data. The template_args in this node iterate upon # the values in the infosource node template_path = '*%s_task-rest_run-01_meg_0_60_raw_filt_dsamp_ica_ROI_ts.npy' datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['ts_file']), name='datasource') datasource.inputs.base_directory = data_path datasource.inputs.template = template_path datasource.inputs.template_args = dict(ts_file=[['subject_id']])
reject = params["preprocessing"]['reject'] ############################################################################### # Then, we create our workflow and specify the `base_dir` which tells # nipype the directory in which to store the outputs. # workflow directory within the `base_dir` preproc_pipeline_name = 'preprocessing_workflow' main_workflow = pe.Workflow(name=preproc_pipeline_name) main_workflow.base_dir = data_path ############################################################################### # Then we create a node to pass input filenames to DataGrabber from nipype infosource = create_iterator(['subject_id', 'session_id'], [subject_ids, session_ids]) ############################################################################### # and a node to grab data. The template_args in this node iterate upon # the values in the infosource node template_path = '*%s/%s/meg/%s*rest*0_60*raw.fif' template_args = [['subject_id', 'session_id', 'subject_id']] datasource = create_datagrabber(data_path, template_path, template_args) ############################################################################### # Ephypype creates for us a pipeline which can be connected to these # nodes we created. The preprocessing pipeline is implemented by the function # :func:`ephypype.pipelines.preproc_meeg.create_pipeline_preproc_meeg`, thus to # instantiate this pipeline node, we import it and pass our # parameters to it.
############################################################################### # Then, we create our workflow and specify the `base_dir` which tells # nipype the directory in which to store the outputs. # workflow directory within the `base_dir` src_reconstruction_pipeline_name = 'source_reconstruction_' + \ inv_method + '_' + parc.replace('.', '') main_workflow = pe.Workflow(name=src_reconstruction_pipeline_name) main_workflow.base_dir = data_path ############################################################################### # Then we create a node to pass input filenames to DataGrabber from nipype infosource = create_iterator(['subject_id'], [subject_ids]) ############################################################################### # and a node to grab data. The template_args in this node iterate upon # the values in the infosource node template_path = '*%s/MEG/%s_sss_filt_ica-raw.fif' template_args = [['subject_id', 'subject_id']] infields = ['subject_id'] datasource = create_datagrabber(data_path, template_path, template_args, infields=infields) ############################################################################### # Ephypype creates for us a pipeline which can be connected to these
def create_main_workflow_FS_segmentation(): # Check envoiroment variables if not os.environ.get('FREESURFER_HOME'): raise RuntimeError('FREESURFER_HOME environment variable not set') if not os.environ.get('SUBJECTS_DIR'): os.environ["SUBJECTS_DIR"] = subjects_dir if not op.exists(subjects_dir): os.mkdir(subjects_dir) print('SUBJECTS_DIR %s ' % os.environ["SUBJECTS_DIR"]) main_workflow = pe.Workflow(name=MAIN_WF_name) main_workflow.base_dir = subjects_dir # (1) we create a node to pass input filenames to DataGrabber from nipype # iterate over subjects infosource = create_iterator(['subject_id'], [subject_ids]) # # and a node to grab data. The template_args in this node iterate upon # the values in the infosource node # Here we define an input field for datagrabber called subject_id. # This is then used to set the template (see %s in the template). # we look for .nii files template_path = '%s/anatomy/highres001.nii.gz' template_args = [['subject_id']] infields = ['subject_id'] datasource = create_datagrabber(data_path, template_path, template_args, infields=infields) # (2) ReconAll Node to generate surfaces and parcellations of structural # data from anatomical images of a subject. recon_all = pe.Node(interface=ReconAll(), infields=['T1_files'], name='recon_all') recon_all.inputs.subjects_dir = subjects_dir recon_all.inputs.directive = 'all' # reconall_workflow will be a node of the main workflow reconall_workflow = pe.Workflow(name=FS_WF_name) reconall_workflow.base_dir = wf_path reconall_workflow.connect(infosource, 'subject_id', recon_all, 'subject_id') reconall_workflow.connect(infosource, 'subject_id', datasource, 'subject_id') reconall_workflow.connect(datasource, 'raw_file', recon_all, 'T1_files') # (3) BEM generation by make_watershed_bem of MNE Python package bem_generation = pe.Node(interface=Function( input_names=['subjects_dir', 'sbj_id'], output_names=['sbj_id'], function=_create_bem_sol), name='call_mne_watershed_bem') bem_generation.inputs.subjects_dir = subjects_dir main_workflow.connect(reconall_workflow, 'recon_all.subject_id', bem_generation, 'sbj_id') return main_workflow
sfreq = 1200 ############################################################################### # Then, we create our workflow and specify the `base_dir` which tells # nipype the directory in which to store the outputs. # workflow directory within the `base_dir` correl_analysis_name = 'spectral_connectivity_' + con_method main_workflow = pe.Workflow(name=correl_analysis_name) main_workflow.base_dir = data_path ############################################################################### # Then we create a node to pass input filenames to DataGrabber from nipype infosource = create_iterator(['subject_id', 'session_id', 'run_id', 'cond_id', 'freq_band_name'], [subject_ids, session_ids, run_ids, cond_ids, freq_band_names]) ############################################################################### # and a node to grab data. The template_args in this node iterate upon # the values in the infosource node sources_fp = '/scratch/hyruuk/saflow_data/saflow_bids/source_reconstruction_MNE_aparca2009s/inv_sol_pipeline/' template_path = sources_fp + '_run_id_%s_session_id_%s_subject_id_%s/inv_solution/%s_%s_task-gradCPT_%s_meg_%s_-epo_stc.hdf5' template_args = [['run_id', 'session_id', 'subject_id', 'subject_id', 'session_id', 'run_id', 'cond_id']] datasource = pe.Node( interface=nio.DataGrabber(infields=['subject_id', 'session_id', 'run_id', 'cond_id'], outfields=['ts_file']), name='datasource') datasource.inputs.base_directory = data_path datasource.inputs.template = template_path