def create_converter_structural_pipeline(working_dir, ds_dir, name="converter_struct"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["t1w_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["t1w"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    # convert to nifti
    # todo check if geometry bugs attac. use dcm2nii?
    converter_t1w = Node(DcmStack(embed_meta=True), name="converter_t1w")
    converter_t1w.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_t1w.inputs.out_format = "t1w"

    converter_wf.connect(inputnode, "t1w_dicom", converter_t1w, "dicom_files")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(converter_t1w, "out_file", reor_2_std, "in_file")

    converter_wf.connect(reor_2_std, "out_file", outputnode, "t1w")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "sMRI")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
Example #2
0
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir,
                      standard_brain):

    # main workflow
    struct_preproc = Workflow(name='anat_preproc')
    struct_preproc.base_dir = working_dir
    struct_preproc.config['execution'][
        'crashdump_dir'] = struct_preproc.base_dir + "/crash_files"

    # select files
    #templates={'anat': '3T/nifti/MPRAGEADNI32Ch.nii.gz'}
    #selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),    name="selectfiles")

    # workflow to run freesurfer reconall
    reconall = create_reconall_pipeline()
    reconall.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    reconall.inputs.inputnode.fs_subject_id = subject

    # workflow to get brain, head and wmseg from freesurfer and convert to nifti
    mgzconvert = create_mgzconvert_pipeline()
    mgzconvert.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    mgzconvert.inputs.inputnode.fs_subject_id = subject

    normalize = create_normalize_pipeline()
    normalize.inputs.inputnode.standard = standard_brain

    # sink to store files
    sink = Node(nio.DataSink(base_directory=out_dir,
                             parameterization=False,
                             substitutions=[('transform_Warped',
                                             'T1_brain2mni')]),
                name='sink')

    # connections
    struct_preproc.connect([  #(selectfiles, sink, [('anat', 'outputnode.test')]),
        #(selectfiles, reconall, [('anat', 'inputnode.anat')]),
        #(reconall, mgzconvert,  [('outputnode.fs_subject_id', 'inputnode.fs_subject_id'),
        #                         ('outputnode.fs_subjects_dir', 'inputnode.fs_subjects_dir')]),
        #for second round of structural don't redo FREESURFER
        (mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
        (mgzconvert, sink, [('outputnode.anat_head', '@head')]),
        (mgzconvert, sink, [('outputnode.anat_brain', '@brain')]),
        (mgzconvert, sink, [('outputnode.anat_brain_mask', '@mask')]),
        (mgzconvert, sink, [('outputnode.wmedge', '@wmedge')]),
        (normalize, sink, [('outputnode.anat2std', '@anat2std'),
                           ('outputnode.anat2std_transforms',
                            'transforms2mni.@anat2std_transforms'),
                           ('outputnode.std2anat_transforms',
                            'transforms2mni.@std2anat_transforms')])
    ])

    struct_preproc.write_graph(dotfilename='struct_preproc.dot',
                               graph2use='colored',
                               format='pdf',
                               simple_form=True)
    # struct_preproc.run()
    struct_preproc.run(
    )  #, plugin_args = {'initial_specs': 'request_memory = 1500'}plugin='CondorDAGMan'
Example #3
0
def create_converter_diffusion_pipeline(working_dir,
                                        ds_dir,
                                        name='converter_diffusion'):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['dMRI_dicom']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['dMRI']),
                      name='outputnode')

    niftisink = Node(nio.DataSink(), name='niftisink')
    niftisink.inputs.base_directory = os.path.join(ds_dir, 'raw_niftis')

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {'submit_specs': 'request_memory = 2000'}
    converter_wf.connect(inputnode, 'dMRI_dicom', converter_dMRI,
                         'source_names')

    dMRI_rename = Node(util.Rename(format_string='DTI_mx_137.nii.gz'),
                       name='dMRI_rename')
    converter_wf.connect(converter_dMRI, 'converted_files', dMRI_rename,
                         'in_file')

    bvecs_rename = Node(util.Rename(format_string='DTI_mx_137.bvecs'),
                        name='bvecs_rename')
    converter_wf.connect(converter_dMRI, 'bvecs', bvecs_rename, 'in_file')

    bvals_rename = Node(util.Rename(format_string='DTI_mx_137.bvals'),
                        name='bvals_rename')
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, 'in_file')

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name='reor_2_std')
    converter_wf.connect(dMRI_rename, 'out_file', reor_2_std, 'in_file')
    converter_wf.connect(reor_2_std, 'out_file', outputnode, 'dMRI')

    # save original niftis
    converter_wf.connect(reor_2_std, 'out_file', niftisink, 'dMRI.@dwi')
    converter_wf.connect(bvals_rename, 'out_file', niftisink, 'dMRI.@bvals')
    converter_wf.connect(bvecs_rename, 'out_file', niftisink, 'dMRI.@bvecs')

    converter_wf.write_graph(dotfilename='converter_struct',
                             graph2use='flat',
                             format='pdf')
    return converter_wf
Example #4
0
def ants_ct_wf(subjects_id,
            preprocessed_data_dir,
            working_dir,
            ds_dir,
            template_dir,
            plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo



    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='ants_ct')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')



    #####################################
    # GET DATA
    #####################################
    # GET SUBJECT SPECIFIC STRUCTURAL DATA
    in_data_templates = {
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
    }

    in_data = Node(nio.SelectFiles(in_data_templates,
                                       base_directory=preprocessed_data_dir),
                       name="in_data")
    in_data.inputs.subject_id = subjects_id


    # GET NKI ANTs templates
    ants_templates_templates = {
        'brain_template': 'NKI/T_template.nii.gz',
        'brain_probability_mask': 'NKI/T_templateProbabilityMask.nii.gz',
        'segmentation_priors': 'NKI/Priors/*.nii.gz',
        't1_registration_template': 'NKI/T_template_BrainCerebellum.nii.gz'

    }

    ants_templates = Node(nio.SelectFiles(ants_templates_templates,
                                       base_directory=template_dir),
                       name="ants_templates")
Example #5
0
def ants_ct_wf(subjects_id, preprocessed_data_dir, working_dir, ds_dir,
               template_dir, plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo

    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='ants_ct')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={
                          'stop_on_first_crash': True,
                          'remove_unnecessary_outputs': True,
                          'job_finished_timeout': 120
                      })
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(
        working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')

    #####################################
    # GET DATA
    #####################################
    # GET SUBJECT SPECIFIC STRUCTURAL DATA
    in_data_templates = {
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
    }

    in_data = Node(nio.SelectFiles(in_data_templates,
                                   base_directory=preprocessed_data_dir),
                   name="in_data")
    in_data.inputs.subject_id = subjects_id

    # GET NKI ANTs templates
    ants_templates_templates = {
        'brain_template': 'NKI/T_template.nii.gz',
        'brain_probability_mask': 'NKI/T_templateProbabilityMask.nii.gz',
        'segmentation_priors': 'NKI/Priors/*.nii.gz',
        't1_registration_template': 'NKI/T_template_BrainCerebellum.nii.gz'
    }

    ants_templates = Node(nio.SelectFiles(ants_templates_templates,
                                          base_directory=template_dir),
                          name="ants_templates")
Example #6
0
def create_conversion(name, subject, scans, working_dir, out_dir, folder,
                      xnat_server, xnat_user, xnat_pass, project_id, exp_id):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config['execution'][
        'crashdump_dir'] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(
        util.IdentityInterface(fields=['scan_key', 'scan_val']),
        name='scan_infosource')
    scan_infosource.iterables = [('scan_key', scans.keys()),
                                 ('scan_val', scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(nio.XNATSource(
        infields=['project_id', 'subject_id', 'exp_id', 'scan_id'],
        outfields=['dicom'],
        server=xnat_server,
        user=xnat_user,
        pwd=xnat_pass,
        cache_dir=working_dir),
                      name='xnatsource')

    xnatsource.inputs.query_template = (
        '/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files'
    )  #files')
    xnatsource.inputs.query_template_args['dicom'] = [[
        'project_id', 'subject_id', 'exp_id', 'scan_id'
    ]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [('scan_val', 'scan_id')])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect([
        (scan_infosource, dcmconvert, [('scan_key', 'inputnode.filename')]),
        (xnatsource, dcmconvert, [('dicom', 'inputnode.dicoms')])
    ])

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False),
                name='sink')

    convert.connect([(dcmconvert, sink, [('outputnode.nifti', folder)])])

    convert.run()
Example #7
0
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
def create_workflow(xfm_dir,
                    xfm_pattern,
                    atlas_dir,
                    atlas_pattern,
                    source_dir,
                    source_pattern,
                    work_dir,
                    out_dir,
                    name="new_data_to_atlas_space"):

    wf = Workflow(name=name)
    wf.base_dir = os.path.join(work_dir)

    datasource_source = Node(interface=DataGrabber(sort_filelist=True),
                             name='datasource_source')
    datasource_source.inputs.base_directory = os.path.abspath(source_dir)
    datasource_source.inputs.template = source_pattern

    datasource_xfm = Node(interface=DataGrabber(sort_filelist=True),
                          name='datasource_xfm')
    datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
    datasource_xfm.inputs.template = xfm_pattern

    datasource_atlas = Node(interface=DataGrabber(sort_filelist=True),
                            name='datasource_atlas')
    datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
    datasource_atlas.inputs.template = atlas_pattern

    resample = MapNode(interface=Resample(sinc_interpolation=True),
                       name='resample_',
                       iterfield=['input_file', 'transformation'])
    wf.connect(datasource_source, 'outfiles', resample, 'input_file')
    wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
    wf.connect(datasource_atlas, 'outfiles', resample, 'like')

    bigaverage = Node(interface=BigAverage(output_float=True, robust=False),
                      name='bigaverage',
                      iterfield=['input_file'])

    wf.connect(resample, 'output_file', bigaverage, 'input_files')

    datasink = Node(interface=DataSink(base_directory=out_dir,
                                       container=out_dir),
                    name='datasink')

    wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
    wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
    wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])

    return wf
Example #9
0
def prepare_indT_cs():
    outdirs = [
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_con_vs_stroke/alff_analysis/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_con_vs_stroke/falff_analysis/'
    ]

    for n, outdir in enumerate(outdirs):

        stroke_hdir = '/home/peter/Desktop/prepare/rest/output/'
        all_stroke_files = glob.glob(stroke_hdir + '/*/f_alff/' + maps[n])
        stroke_files = [x for x in all_stroke_files if '12months' not in x]

        healthy_hdir = '/home/peter/Desktop/Connect/rest/output/'
        all_healthy_files = glob.glob(healthy_hdir + '/D_H*/f_alff/' + maps[n])
        healthy_files = [x for x in all_healthy_files if 'P2' not in x]

        ttest = Node(TwoSampleTTestDesign(), name='TwoSampleT')
        ttest.inputs.group1_files = healthy_files
        ttest.inputs.group2_files = stroke_files

        modelEst = Node(EstimateModel(), name='EstimateModel')
        modelEst.inputs.estimation_method = {'Classical': 1}

        conEst = Node(EstimateContrast(), name='EstimateContrasts')

        con_1 = ('Controls', 'T', ['Group_{1}', 'Group_{2}'], [1.0, 0.0])
        con_2 = ('Patients', 'T', ['Group_{1}', 'Group_{2}'], [0.0, 1.0])
        con_3 = ('Controls>Patients', 'T', ['Group_{1}',
                                            'Group_{2}'], [1.0, -1.0])
        con_4 = ('Patients>Controls', 'T', ['Group_{1}',
                                            'Group_{2}'], [-1.0, 1.0])

        contrasts = [con_1, con_2, con_3, con_4]

        conEst.inputs.contrasts = contrasts
        conEst.inputs.group_contrast = True

        l2analysis = Workflow(name='l2analysis')
        l2analysis.base_dir = outdir

        l2analysis.connect([
            (ttest, modelEst, [('spm_mat_file', 'spm_mat_file')]),
            (modelEst, conEst, [('spm_mat_file', 'spm_mat_file'),
                                ('beta_images', 'beta_images'),
                                ('residual_image', 'residual_image')]),
        ])

        l2analysis.write_graph(graph2use='colored')
        l2analysis.run('MultiProc', plugin_args={'n_procs': 1})
Example #10
0
def results(cenc_participant_id,  cenc_participant_dir, cenc_freesurfer_dir,cenc_results_dir, verbose):

     util.mkcd_dir( [ cenc_results_dir ], True)

     files_to_convert = [ os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'nu.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'brainmask.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aparc.a2009s+aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'wmparc.mgz')
                          ]

     # Check if files exist

     print files_to_convert

     if util.check_files(files_to_convert, True) == False:
          sys.exit()

     # Create link to directory

     freesurfer_results_dir =  os.path.abspath(os.path.join( cenc_participant_dir, 'freesurfer','results'))

     if not os.path.exists(freesurfer_results_dir):
          util.force_symbolic_link( os.path.join( cenc_freesurfer_dir,  cenc_participant_id ), freesurfer_results_dir)

     # TODO use input node to run this instead of a loop

     mc = Node( fs.MRIConvert( out_type = 'niigz'
                               ),
                name="mri_convert"
                )  
       
     mc.iterables = ( "in_file", files_to_convert )

     reorient = Node( fsl.Reorient2Std(), name="reorient" )

     workflow_convert = Workflow(name='cenc_freesurfer_nipype_workflow')
     workflow_convert.base_dir = cenc_results_dir
     
     workflow_convert.connect( [ (mc,       reorient, [('out_file', 'in_file')] )]
                               )    
     workflow_convert.run()
     
     # Create final brain mask. This takes forever. Speeding it up would be helpful. 

     cenc.create_mask( os.path.join( cenc_results_dir, 'brainmask.nii.gz'),         
                       os.path.join( cenc_results_dir, 'aparc.a2009s+aseg.nii.gz'), 
                       os.path.join( cenc_results_dir, 'mask.nii.gz')
                       )
def create_converter_diffusion_pipeline(working_dir, ds_dir, name="converter_diffusion"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["dMRI_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["dMRI"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_wf.connect(inputnode, "dMRI_dicom", converter_dMRI, "source_names")

    dMRI_rename = Node(util.Rename(format_string="DTI_mx_137.nii.gz"), name="dMRI_rename")
    converter_wf.connect(converter_dMRI, "converted_files", dMRI_rename, "in_file")

    bvecs_rename = Node(util.Rename(format_string="DTI_mx_137.bvecs"), name="bvecs_rename")
    converter_wf.connect(converter_dMRI, "bvecs", bvecs_rename, "in_file")

    bvals_rename = Node(util.Rename(format_string="DTI_mx_137.bvals"), name="bvals_rename")
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, "in_file")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(dMRI_rename, "out_file", reor_2_std, "in_file")
    converter_wf.connect(reor_2_std, "out_file", outputnode, "dMRI")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "dMRI.@dwi")
    converter_wf.connect(bvals_rename, "out_file", niftisink, "dMRI.@bvals")
    converter_wf.connect(bvecs_rename, "out_file", niftisink, "dMRI.@bvecs")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
Example #12
0
def index_lesion_workflow(msid, mseid, lesion):
    import nipype.interfaces.ants as ants
    from nipype.pipeline.engine import Node, Workflow, MapNode
    from nipype.interfaces.io import DataSink, DataGrabber
    from nipype.interfaces.utility import IdentityInterface, Function
    import nipype.interfaces.fsl as fsl
    from nipype.utils.filemanip import load_json

    working_directory = '/working/henry_temp/keshavan/'
    output_directory = os.path.split(lesion)[0]

    register = Workflow(name="indexed_lesion_{0}_{1}".format(msid, mseid))
    register.base_dir = working_directory
    inputnode = Node(IdentityInterface(fields=["lesion"]), name="inputspec")
    inputnode.inputs.lesion = lesion

    bin_math = Node(fsl.BinaryMaths(), name="Convert_to_binary")
    bin_math.inputs.operand_value = 1
    bin_math.inputs.operation = 'min'
    register.connect(inputnode, "lesion", bin_math, "in_file")

    cluster_lesion = Node(fsl.Cluster(threshold=0.0001,
                                      out_index_file=True,
                                      use_mm=True),
                          name="cluster_lesion")

    sinker = Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_directory
    sinker.inputs.container = '.'
    sinker.inputs.substitutions = [('_maths', '')]

    register.connect(bin_math, "out_file", cluster_lesion, "in_file")
    register.connect(cluster_lesion, "index_file", sinker, "@cluster")

    from nipype.interfaces.freesurfer import SegStats
    segstats_lesion = Node(SegStats(), name="segstats_lesion")
    register.connect(cluster_lesion, "index_file", segstats_lesion,
                     "segmentation_file")
    register.connect(segstats_lesion, "summary_file", sinker, "@summaryfile")

    register.write_graph(graph2use='orig')
    register.config["Execution"] = {
        "keep_inputs": True,
        "remove_unnecessary_outputs": False
    }
    return register
Example #13
0
def init_mriqc_wf():
    """Create a multi-subject MRIQC workflow."""
    from .. import config

    workflow = Workflow(name="mriqc_wf")
    workflow.base_dir = config.execution.work_dir

    if "bold" in config.workflow.inputs:
        workflow.add_nodes([fmri_qc_workflow()])

    if set(("T1w", "T2w")).intersection(
        config.workflow.inputs.keys()
    ):
        workflow.add_nodes([anat_qc_workflow()])

    if not workflow._get_all_nodes():
        return None

    return workflow
Example #14
0
def create_converter_structural_pipeline(working_dir,
                                         ds_dir,
                                         name='converter_struct'):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['t1w_dicom']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['t1w']),
                      name='outputnode')

    niftisink = Node(nio.DataSink(), name='niftisink')
    niftisink.inputs.base_directory = os.path.join(ds_dir, 'raw_niftis')

    # convert to nifti
    # todo check if geometry bugs attac. use dcm2nii?
    converter_t1w = Node(DcmStack(embed_meta=True), name='converter_t1w')
    converter_t1w.plugin_args = {'submit_specs': 'request_memory = 2000'}
    converter_t1w.inputs.out_format = 't1w'

    converter_wf.connect(inputnode, 't1w_dicom', converter_t1w, 'dicom_files')

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name='reor_2_std')
    converter_wf.connect(converter_t1w, 'out_file', reor_2_std, 'in_file')

    converter_wf.connect(reor_2_std, 'out_file', outputnode, 't1w')

    # save original niftis
    converter_wf.connect(reor_2_std, 'out_file', niftisink, 'sMRI')

    converter_wf.write_graph(dotfilename='converter_struct',
                             graph2use='flat',
                             format='pdf')
    return converter_wf
Example #15
0
def init_mriqc_wf():
    """Create a multi-subject MRIQC workflow."""
    from mriqc import config

    # Create parent workflow
    workflow = Workflow(name="mriqc_wf")
    workflow.base_dir = config.execution.work_dir

    # Create fMRI QC workflow
    if FMRI_KEY in config.workflow.inputs:
        workflow.add_nodes([fmri_qc_workflow()])

    # Create sMRI QC workflow
    input_keys = config.workflow.inputs.keys()
    anatomical_flag = any(key in input_keys for key in ANATOMICAL_KEYS)
    if anatomical_flag:
        workflow.add_nodes([anat_qc_workflow()])

    # Return non-empty workflow, else None
    if workflow._get_all_nodes():
        return workflow
Example #16
0
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir, standard_brain):
    # main workflow
    struct_preproc = Workflow(name='anat_preproc')
    struct_preproc.base_dir = working_dir
    struct_preproc.config['execution']['crashdump_dir'] = struct_preproc.base_dir + "/crash_files"


    # workflow to get brain, head and wmseg from freesurfer and convert to nifti
    mgzconvert = create_mgzconvert_pipeline()
    mgzconvert.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    mgzconvert.inputs.inputnode.fs_subject_id = subject

    normalize = create_normalize_pipeline()
    normalize.inputs.inputnode.standard = standard_brain

    # sink to store files
    sink = Node(nio.DataSink(base_directory=out_dir,
                             parameterization=False,
                             substitutions=[
                                 ('transform_Warped', 'T1_brain2mni')]),
                name='sink')

    # connections
    struct_preproc.connect(
        [(mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
         (mgzconvert, sink, [('outputnode.anat_head', '@head')]),
         (mgzconvert, sink, [('outputnode.anat_brain', '@brain')]),
         (mgzconvert, sink, [('outputnode.anat_brain_mask', '@mask')]),
         (normalize, sink, [('outputnode.anat2std', '@anat2std'),
                            ('outputnode.anat2std_transforms', 'transforms2mni.@anat2std_transforms'),
                            ('outputnode.std2anat_transforms', 'transforms2mni.@std2anat_transforms')])
         ])

    struct_preproc.write_graph(dotfilename='struct_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
    # struct_preproc.run()
    struct_preproc.run(plugin='CondorDAGMan', plugin_args = {'initial_specs': 'request_memory = 1500'})  #
Example #17
0
   
topup.connect([(inputnode, convertwarp, [('anat_head', 'reference')]),
              (epireg, convertwarp, [('shiftmap', 'shiftmap')]),
              (concat, convertwarp, [('out_file', 'postmat')]),
              (inputnode, applywarp, [('epi_mean', 'in_file'),
                                      ('anat_head', 'ref_file')]),
              (convertwarp, applywarp, [('out_field', 'field_file')]),
              (applywarp, outputnode, [('out_file', 'topup_mean_coreg')]),
              (convertwarp, outputnode, [('out_field', 'topup_fullwarp')])
              ])



##### in and output ############

topup.base_dir='/scr/kansas1/huntenburg/'
topup.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir='/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
subjects=['LEMON001']
# subjects=[]
# f = open('/scr/jessica2/Schaare/LEMON/done_freesurfer.txt','r')
# for line in f:
#     subjects.append(line.strip())


# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']), 
                  name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
def collect_3d_metrics_run_glm_meanRegression(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode, JoinNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    # INPUT PARAMETERS

    metrics_data_dir = cfg['metrics_data_dir']
    metrics_data_suffix = cfg['metrics_data_suffix']
    metric_name = cfg['metric_name']

    demos_df = cfg['demos_df']
    qc_df = cfg['qc_df']

    working_dir = cfg['working_dir']
    ds_dir = cfg['ds_dir']
    template_dir = cfg['template_dir']

    subjects_list = cfg['subjects_list']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']


    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    wf = Workflow(name='LeiCA_collect_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')


    # get atlas data
    templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                         'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                         'brain_mask_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm_brain_mask.nii.gz',
                         'brain_template_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm.nii.gz'
                         }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")

    # EXPORT SUBJECT LIST
    def export_subjects_list_fct(subjects_list):
        import os

        out_file = os.path.join(os.getcwd(), 'subject_list.txt')

        file = open(out_file, 'w')
        for subject in subjects_list:
            file.write('%s\n'%subject)
        file.close()
        return out_file



    # RESTRICT SUBJECTS LIST TO ADULTS
    def get_subjects_list_adults_fct(df_path, df_qc_path, subjects_list):
        '''
        excludes kids and subjects with missing sex or age
        '''
        import pandas as pd
        import numpy as np

        df = pd.read_pickle(df_path)
        df_qc = pd.read_pickle(df_qc_path)
        df = pd.merge(df, df_qc, left_index=True, right_index=True)
        pd.to_pickle(df, 'testdf.pkl')

        df['subject_id'] = df.subject_id_x

        # fixme exclude subjects with mean_FD>.1
        subjects_list_exclude = df[(df.age<18) | (df.mean_FD_Power>.1)].index
        subjects_list_adults = subjects_list

        for exclude_subject in subjects_list_exclude:
            if exclude_subject in subjects_list_adults:
                subjects_list_adults.remove(exclude_subject)

        missing_info = df[(df.age==999) | ((np.logical_or(df.sex=='M', df.sex=='F'))==False)].index
        for missing in missing_info:
            if missing in subjects_list_adults:
                subjects_list_adults.remove(missing)


        # remove subject from subject_list_adults for which no entry exists in df
        for subject in subjects_list_adults:
            if not(subject in df.index):
                subjects_list_adults.remove(subject)

        return subjects_list_adults


    get_subjects_list_adults = Node(util.Function(input_names=['df_path', 'df_qc_path', 'subjects_list'],
                                      output_names=['subjects_list_adults'],
                                      function=get_subjects_list_adults_fct),
                                name='get_subjects_list_adults')
    get_subjects_list_adults.inputs.df_path = demos_df
    get_subjects_list_adults.inputs.df_qc_path = qc_df
    get_subjects_list_adults.inputs.subjects_list = subjects_list



    export_subjects_list = Node(util.Function(input_names=['subjects_list'],
                                      output_names=['out_file'],
                                      function=export_subjects_list_fct),
                                name='export_subjects_list')
    wf.connect(get_subjects_list_adults, 'subjects_list_adults', export_subjects_list,'subjects_list')
    wf.connect(export_subjects_list, 'out_file', ds,'@subjects_list')


    # BUILD FILE LIST FOR MERGER
    def build_file_list_fct(prefix, subjects_list, suffix):
        import os

        out_file_list = []
        for subject in subjects_list:
            out_file_list.append(os.path.join(prefix, subject, suffix))

        return out_file_list



    build_file_list = Node(util.Function(input_names=['prefix', 'subjects_list', 'suffix'],
                                      output_names=['out_file_list'],
                                      function=build_file_list_fct),
                        name='build_file_list')
    build_file_list.inputs.prefix = metrics_data_dir
    wf.connect(get_subjects_list_adults, 'subjects_list_adults', build_file_list,'subjects_list')
    build_file_list.inputs.suffix = metrics_data_suffix



    # MERGE FILES
    merge = Node(fsl.Merge(dimension='t'), name='merge')
    wf.connect(build_file_list, 'out_file_list', merge, 'in_files')
    merge.inputs.merged_file = metric_name + '_merge.nii.gz'
    wf.connect(merge, 'merged_file', ds,'@merge')

    # GET MEAN VALUE WITHIN MASK FOR REGRESSION
    get_mean_values = Node(fsl.ImageStats(), name='get_mean_values')
    get_mean_values.inputs.op_string='-k %s -m'
    get_mean_values.inputs.split_4d=True
    wf.connect(merge, 'merged_file', get_mean_values, 'in_file')
    wf.connect(selectfiles_anat_templates, 'brain_mask_MNI_3mm', get_mean_values, 'mask_file')



    # CALC MEAN
    mean = Node(fsl.MeanImage(), name='mean')
    mean.inputs.out_file = metric_name + '_mean.nii.gz'
    wf.connect(merge, 'merged_file', mean, 'in_file')
    wf.connect(mean, 'out_file', ds,'@mean')



    # CREATE DESIGN FILES
    def create_design_files_fct(df_demographics_path, df_qc_path, subjects_list, mean_values):
        '''
        df_path: df should have columns sex ('M', 'F') & age
        function
            .restricts df to subjects_list
            .creates dummy sex vars, age**2, contrasts
            .writes mat & con files
        '''

        import pandas as pd
        import numpy as np
        import os



        df = pd.read_pickle(df_demographics_path)

        df_qc = pd.read_pickle(df_qc_path)
        df = pd.merge(df, df_qc, left_index=True, right_index=True)


        df['dummy_m'] = (df.sex == 'M').astype('int')
        df['dummy_f'] = (df.sex == 'F').astype('int')
        df['age2'] = df.age**2
        df_use = df.loc[subjects_list]
        df_use['mean_values'] = mean_values


        #fixme
        mat = df_use[['dummy_m', 'dummy_f', 'age','mean_FD_Power']].values

        mat_str = [
            '/NumWaves %s'%str(mat.shape[1]),
            '/NumPoints %s'%str(mat.shape[0]),
            '/Matrix'
        ]

        n_cons = 6
        cons_str = [
            '/ContrastName1 pos_age',
            '/ContrastName2 neg_age',
            '/ContrastName3 m>f',
            '/ContrastName4 f>m',
            '/ContrastName5 pos_mean_FD_Power',
            '/ContrastName6 neg_mean_FD_Power',
            '/NumWaves %s'%str(mat.shape[1]),
            '/NumContrasts %s'%str(n_cons),
            '',
            '/Matrix',
            '0 0 1 0',
            '0 0 -1 0',
            '1 -1 0 0',
            '-1 1 0 0',
            '0 0 0 1',
            '0 0 0 -1'
            ]


        # mat = df_use[['dummy_m', 'dummy_f', 'age', 'age2','mean_FD_Power']].values
        #
        # mat_str = [
        #     '/NumWaves %s'%str(mat.shape[1]),
        #     '/NumPoints %s'%str(mat.shape[0]),
        #     '/Matrix'
        # ]
        #
        # n_cons = 10
        # cons_str = [
        #     '/ContrastName1 pos_age',
        #     '/ContrastName2 pos_age2',
        #     '/ContrastName3 pos_age+age2',
        #     '/ContrastName4 neg_age',
        #     '/ContrastName5 neg_age2',
        #     '/ContrastName6 neg_age+age2',
        #     '/ContrastName7 m>f',
        #     '/ContrastName8 f>m',
        #     '/ContrastName9 pos_mean_FD_Power',
        #     '/ContrastName10 neg_mean_FD_Power',
        #     '/NumWaves %s'%str(mat.shape[1]),
        #     '/NumContrasts %s'%str(n_cons),
        #     '',
        #     '/Matrix',
        #     '0 0 1 0 0',
        #     '0 0 0 1 0',
        #     '0 0 .5 .5 0',
        #     '0 0 -1 0 0',
        #     '0 0 0 -1 0',
        #     '0 0 -.5 -.5 0',
        #     '1 -1 0 0 0',
        #     '1 1 0 0 0',
        #     '0 0 0 0 1',
        #     '0 0 0 0 -1'
        #     ]
        #

        mat_file = os.path.join(os.getcwd(), 'design.mat')
        mat_file_numerical = os.path.join(os.getcwd(), 'design_mat_num.txt')
        con_file = os.path.join(os.getcwd(), 'design.con')
        df_used_file = os.path.join(os.getcwd(), 'df_used.csv')

        # mat_str_out = ''
        # for line in mat_str:
        #     mat_str_out = mat_str_out[:] + line + '\n'

        np.savetxt(mat_file_numerical, mat)
        num_file = open(mat_file_numerical, 'r')
        nums_str = num_file.read()
        num_file.close()

        out_file = open(mat_file, 'w')
        for line in mat_str:
            out_file.write('%s\n' % line)
        out_file.write(nums_str)
        out_file.close()

        out_file = open(con_file, 'w')
        for line in cons_str:
            out_file.write('%s\n' % line)
        out_file.close()

        df_use.to_csv(df_used_file)

        return mat_file, con_file, df_used_file, n_cons


    create_design_files = Node(util.Function(input_names=['df_demographics_path', 'df_qc_path', 'subjects_list', 'mean_values'],
                                      output_names=['mat_file', 'con_file', 'df_used_file', 'n_cons'],
                                      function=create_design_files_fct),
                        name='create_design_files')
    create_design_files.inputs.df_demographics_path = demos_df
    create_design_files.inputs.df_qc_path = qc_df
    wf.connect(get_subjects_list_adults, 'subjects_list_adults', create_design_files,'subjects_list')
    wf.connect(get_mean_values, 'out_stat', create_design_files,'mean_values')
    wf.connect(create_design_files,'df_used_file', ds,'glm.@df_used')



    smooth = Node(fsl.utils.Smooth(fwhm=4), name='smooth')
    wf.connect(merge, 'merged_file', smooth, 'in_file')



    def run_randomise_fct(data_file, mat_file, con_file, mask_file):
        import os
        out_dir = os.path.join(os.getcwd(), 'glm')
        os.mkdir(out_dir)
        cmd_str = 'randomise -i %s -o glm/glm -d %s -t %s -m %s -n 500 -D -T' %(data_file,mat_file, con_file, mask_file)
        file = open('command.txt', 'w')
        file.write(cmd_str)
        file.close()
        os.system(cmd_str)

        return out_dir



    run_randomise = Node(util.Function(input_names=['data_file', 'mat_file', 'con_file', 'mask_file'],
                                      output_names=['out_dir'],
                                      function=run_randomise_fct),
                        name='run_randomise')
    #fixme
    #wf.connect(merge, 'merged_file', run_randomise, 'data_file')
    wf.connect(smooth, 'smoothed_file', run_randomise, 'data_file')
    wf.connect(create_design_files, 'mat_file', run_randomise, 'mat_file')
    wf.connect(create_design_files, 'con_file', run_randomise, 'con_file')
    #wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', run_randomise, 'mask_file')
    wf.connect(selectfiles_anat_templates, 'brain_mask_MNI_3mm', run_randomise, 'mask_file')



    def create_renders_fct(randomise_dir, n_cons, background_img):
        import os
        thresh = .95
        out_files_list = []
        corr_list = ['glm_tfce_corrp_tstat', 'glm_tfce_p_tstat']
        for corr in corr_list:
            for con in range(1,n_cons+1):
                output_root = corr + str(con)
                in_file = os.path.join(randomise_dir, output_root + '.nii.gz')
                out_file = os.path.join(os.getcwd(), 'rendered_' + output_root + '.png')
                out_files_list.append(out_file)
                cmd_str = 'easythresh %s %s %s %s' % (in_file, str(thresh), background_img, output_root)

                file = open('command.txt', 'w')
                file.write(cmd_str)
                file.close()

                os.system(cmd_str)
        return out_files_list

    create_renders = Node(util.Function(input_names=['randomise_dir', 'n_cons', 'background_img'],
                                        output_names=['out_files_list'],
                                        function=create_renders_fct),
                          name='create_renders')
    wf.connect(run_randomise, 'out_dir', create_renders, 'randomise_dir')
    wf.connect(create_design_files, 'n_cons', create_renders, 'n_cons')
    wf.connect(selectfiles_anat_templates, 'brain_template_MNI_3mm', create_renders, 'background_img')
    wf.connect(create_renders, 'out_files_list', ds, 'glm.@renders')

    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
    
fmapepi.connect([(inputnode, convertwarp, [('anat_head', 'reference')]),
                 (convertwarp0, convertwarp, [('out_field', 'warp1')]),
                 (bbregister, convertwarp, [('out_fsl_file', 'postmat')]),
                 (inputnode, applywarp, [('epi_mean', 'in_file'),
                                         ('anat_head', 'ref_file')]),
                 (convertwarp, applywarp, [('out_field', 'field_file')]),
                 (applywarp, outputnode, [('out_file', 'fmap_mean_coreg')]),
                 (convertwarp, outputnode, [('out_field', 'fmap_fullwarp')])
              ])



#### running directly ############################################################################################################

fmapepi.base_dir='/scr/kansas1/huntenburg/lemon_missing/working_dir/'
#fmapepi.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir = '/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#subjects=['LEMON001']
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/missing_subjects.txt','r')
for line in f:
    subjects.append(line.strip())

# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']), 
                  name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
infosource.iterables=('subject_id', subjects)
Example #20
0
def create_resting(subject, working_dir, data_dir, freesurfer_dir, out_dir,
                   vol_to_remove, TR, epi_resolution, highpass, lowpass,
                   echo_space, pe_dir, standard_brain,
                   standard_brain_resampled, standard_brain_mask,
                   standard_brain_mask_resampled, fwhm_smoothing):
    # set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # main workflow
    func_preproc = Workflow(name='resting_postscrub')
    func_preproc.base_dir = working_dir
    func_preproc.config['execution'][
        'crashdump_dir'] = func_preproc.base_dir + "/crash_files"
    # select files
    templates = {
        'epi_scrubbed_interp':
        'preprocessing/preprocessed/{subject}/scrubbed_interpolated/rest2anat_denoised_scrubbed_intep.nii.gz',
        'anat_head':
        'preprocessing/preprocessed/{subject}/structural/T1.nii.gz',
        'anat_brain':
        'preprocessing/preprocessed/{subject}/structural/brain.nii.gz',
        'brain_mask':
        'preprocessing/preprocessed/{subject}/structural/T1_brain_mask.nii.gz',
        'ants_affine':
        'preprocessing/preprocessed/{subject}/structural/transforms2mni/transform0GenericAffine.mat',
        'ants_warp':
        'preprocessing/preprocessed/{subject}/structural/transforms2mni/transform1Warp.nii.gz'
    }

    selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")
    selectfiles.inputs.subject = subject

    # node to remove first volumes
    #    remove_vol = Node(util.Function(input_names=['in_file', 't_min'],
    #                                    output_names=["out_file"],
    #                                    function=strip_rois_func),
    #                      name='remove_vol')
    #    remove_vol.inputs.t_min = vol_to_remove
    #    # workflow for motion correction
    #    moco = create_moco_pipeline()
    #
    #    # workflow for fieldmap correction and coregistration
    #    topup_coreg = create_topup_coreg_pipeline()
    #    topup_coreg.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    #    topup_coreg.inputs.inputnode.fs_subject_id = subject
    #    topup_coreg.inputs.inputnode.echo_space = echo_space
    #    topup_coreg.inputs.inputnode.pe_dir = pe_dir
    #
    #    # workflow for applying transformations to timeseries
    #    transform_ts = create_transform_pipeline()
    #    transform_ts.inputs.inputnode.resolution = epi_resolution
    #
    #
    #    # workflow to denoise timeseries
    denoise = create_denoise_pipeline()
    denoise.inputs.inputnode.highpass_sigma = 1. / (2 * TR * highpass)
    denoise.inputs.inputnode.lowpass_sigma = 1. / (2 * TR * lowpass)
    # https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
    denoise.inputs.inputnode.tr = TR

    # workflow to transform timeseries to MNI
    ants_registration = create_ants_registration_pipeline()
    ants_registration.inputs.inputnode.ref = standard_brain
    ants_registration.inputs.inputnode.tr_sec = TR

    # FL added fullspectrum
    # workflow to transform fullspectrum timeseries to MNI
    #ants_registration_full = create_ants_registration_pipeline('ants_registration_full')
    #ants_registration_full.inputs.inputnode.ref = standard_brain
    #ants_registration_full.inputs.inputnode.tr_sec = TR

    # workflow to smooth
    smoothing = create_smoothing_pipeline()
    smoothing.inputs.inputnode.fwhm = fwhm_smoothing

    # visualize registration results
    visualize = create_visualize_pipeline()
    visualize.inputs.inputnode.mni_template = standard_brain

    # sink to store files
    sink = Node(nio.DataSink(
        parameterization=False,
        base_directory=out_dir,
        substitutions=[('rest_denoised_bandpassed_norm_trans.nii.gz',
                        'rest_scrubbed_int_mni_unsmoothed.nii.gz'),
                       ('rest_denoised_bandpassed_norm_trans_smooth.nii.gz',
                        'rest_scrubbed_int_mni_smoothed.nii.gz'),
                       ('rest_denoised_bandpassed_norm.nii.gz',
                        'rest_denoised_scrubbed_int_bp.nii.gz')]),
                name='sink')

    # connections
    func_preproc.connect([
        # remove the first volumes
        #        (selectfiles, remove_vol, [('func', 'in_file')]),
        #
        #        # align volumes and motion correction
        #        (remove_vol, moco, [('out_file', 'inputnode.epi')]),
        #
        #        # prepare field map
        #        (selectfiles, topup_coreg, [('ap', 'inputnode.ap'),
        #                                   ('pa', 'inputnode.pa'),
        #                                   ('anat_head', 'inputnode.anat_head'),
        #                                   ('anat_brain', 'inputnode.anat_brain')
        #                                   ]),
        #        (moco, topup_coreg, [('outputnode.epi_mean', 'inputnode.epi_mean')]),
        #
        #        # transform timeseries
        #        (remove_vol, transform_ts, [('out_file', 'inputnode.orig_ts')]),
        #        (selectfiles, transform_ts, [('anat_head', 'inputnode.anat_head')]),
        #        (selectfiles, transform_ts, [('brain_mask', 'inputnode.brain_mask')]),
        #        (moco, transform_ts, [('outputnode.mat_moco', 'inputnode.mat_moco')]),
        #        (topup_coreg, transform_ts, [('outputnode.fmap_fullwarp', 'inputnode.fullwarp')]),
        #
        #        # correct slicetiming
        #        # FIXME slice timing?
        #        # (transform_ts, slicetiming, [('outputnode.trans_ts_masked', 'inputnode.ts')]),
        #        # (slicetiming, denoise, [('outputnode.ts_slicetcorrected', 'inputnode.epi_coreg')]),
        #        (transform_ts, denoise, [('outputnode.trans_ts_masked', 'inputnode.epi_coreg')]),

        # denoise data
        (selectfiles, denoise, [('brain_mask', 'inputnode.brain_mask'),
                                ('anat_brain', 'inputnode.anat_brain'),
                                ('epi_scrubbed_interp',
                                 'inputnode.epi_denoised')]),
        (denoise, ants_registration, [('outputnode.normalized_file',
                                       'inputnode.denoised_ts')]),

        # registration to MNI space
        (selectfiles, ants_registration, [('ants_affine',
                                           'inputnode.ants_affine')]),
        (selectfiles, ants_registration, [('ants_warp', 'inputnode.ants_warp')
                                          ]),

        # FL added fullspectrum
        #(selectfiles, ants_registration_full, [('epi_scrubbed_interp', 'inputnode.denoised_ts')]),
        #(selectfiles, ants_registration_full, [('ants_affine', 'inputnode.ants_affine')]),
        #(selectfiles, ants_registration_full, [('ants_warp', 'inputnode.ants_warp')]),
        (ants_registration, smoothing, [('outputnode.ants_reg_ts',
                                         'inputnode.ts_transformed')]),
        (smoothing, visualize, [('outputnode.ts_smoothed',
                                 'inputnode.ts_transformed')]),

        ##all the output
        #        (moco, sink, [  # ('outputnode.epi_moco', 'realign.@realigned_ts'),
        #                        ('outputnode.par_moco', 'realign.@par'),
        #                        ('outputnode.rms_moco', 'realign.@rms'),
        #                        ('outputnode.mat_moco', 'realign.MAT.@mat'),
        #                        ('outputnode.epi_mean', 'realign.@mean'),
        #                        ('outputnode.rotplot', 'realign.plots.@rotplot'),
        #                        ('outputnode.transplot', 'realign.plots.@transplot'),
        #                        ('outputnode.dispplots', 'realign.plots.@dispplots'),
        #                        ('outputnode.tsnr_file', 'realign.@tsnr')]),
        #        (topup_coreg, sink, [('outputnode.fmap', 'coregister.transforms2anat.@fmap'),
        #                            # ('outputnode.unwarpfield_epi2fmap', 'coregister.@unwarpfield_epi2fmap'),
        #                            ('outputnode.unwarped_mean_epi2fmap', 'coregister.@unwarped_mean_epi2fmap'),
        #                            ('outputnode.epi2fmap', 'coregister.@epi2fmap'),
        #                            # ('outputnode.shiftmap', 'coregister.@shiftmap'),
        #                            ('outputnode.fmap_fullwarp', 'coregister.transforms2anat.@fmap_fullwarp'),
        #                            ('outputnode.epi2anat', 'coregister.@epi2anat'),
        #                            ('outputnode.epi2anat_mat', 'coregister.transforms2anat.@epi2anat_mat'),
        #                            ('outputnode.epi2anat_dat', 'coregister.transforms2anat.@epi2anat_dat'),
        #                            ('outputnode.epi2anat_mincost', 'coregister.@epi2anat_mincost')
        #                            ]),
        #
        #        (transform_ts, sink, [('outputnode.trans_ts_masked', 'coregister.@full_transform_ts'),
        #                              ('outputnode.trans_ts_mean', 'coregister.@full_transform_mean'),
        #                              ('outputnode.resamp_brain', 'coregister.@resamp_brain')]),
        (
            denoise,
            sink,
            [
                ('outputnode.normalized_file', 'denoise.@normalized'),
                # FL added fullspectrum
            ]),
        (ants_registration, sink, [('outputnode.ants_reg_ts',
                                    'ants.@antsnormalized')]),
        #(ants_registration_full, sink, [('outputnode.ants_reg_ts', 'ants.@antsnormalized_fullspectrum')]),
        (smoothing, sink, [('outputnode.ts_smoothed', '@smoothed.FWHM6')]),
    ])

    func_preproc.write_graph(dotfilename='func_preproc.dot',
                             graph2use='colored',
                             format='pdf',
                             simple_form=True)
    func_preproc.run()
Example #21
0
def main():
    """Entry point"""
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow
    from mriqc import DEFAULTS
    from mriqc.utils.bids import collect_bids_data
    from mriqc.workflows.core import build_workflow
    # from mriqc.reports.utils import check_reports

    parser = ArgumentParser(description='MRI Quality Control',
                            formatter_class=RawTextHelpFormatter)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='mriqc v{}'.format(__version__))

    parser.add_argument('bids_dir',
                        action='store',
                        help='The directory with the input dataset '
                        'formatted according to the BIDS standard.')
    parser.add_argument(
        'output_dir',
        action='store',
        help='The directory where the output files '
        'should be stored. If you are running group level analysis '
        'this folder should be prepopulated with the results of the'
        'participant level analysis.')
    parser.add_argument(
        'analysis_level',
        action='store',
        nargs='+',
        help='Level of the analysis that will be performed. '
        'Multiple participant level analyses can be run independently '
        '(in parallel) using the same output_dir.',
        choices=['participant', 'group'])
    parser.add_argument(
        '--participant_label',
        '--subject_list',
        '-S',
        action='store',
        help='The label(s) of the participant(s) that should be analyzed. '
        'The label corresponds to sub-<participant_label> from the '
        'BIDS spec (so it does not include "sub-"). If this parameter '
        'is not provided all subjects should be analyzed. Multiple '
        'participants can be specified with a space separated list.',
        nargs="*")

    g_input = parser.add_argument_group('mriqc specific inputs')
    g_input.add_argument('-m',
                         '--modalities',
                         action='store',
                         nargs='*',
                         choices=['T1w', 'bold', 'T2w'],
                         default=['T1w', 'bold', 'T2w'])
    g_input.add_argument('-s', '--session-id', action='store')
    g_input.add_argument('-r', '--run-id', action='store')
    g_input.add_argument('--nthreads',
                         action='store',
                         type=int,
                         help='number of threads')
    g_input.add_argument('--n_procs',
                         action='store',
                         default=0,
                         type=int,
                         help='number of threads')
    g_input.add_argument('--mem_gb',
                         action='store',
                         default=0,
                         type=int,
                         help='available total memory')
    g_input.add_argument('--write-graph',
                         action='store_true',
                         default=False,
                         help='Write workflow graph.')
    g_input.add_argument('--dry-run',
                         action='store_true',
                         default=False,
                         help='Do not run the workflow.')
    g_input.add_argument('--use-plugin',
                         action='store',
                         default=None,
                         help='nipype plugin configuration file')

    g_input.add_argument('--testing',
                         action='store_true',
                         default=False,
                         help='use testing settings for a minimal footprint')
    g_input.add_argument(
        '--hmc-afni',
        action='store_true',
        default=True,
        help='Use ANFI 3dvolreg for head motion correction (HMC)')
    g_input.add_argument(
        '--hmc-fsl',
        action='store_true',
        default=False,
        help='Use FSL MCFLIRT for head motion correction (HMC)')
    g_input.add_argument(
        '-f',
        '--float32',
        action='store_true',
        default=DEFAULTS['float32'],
        help=
        "Cast the input data to float32 if it's represented in higher precision "
        "(saves space and improves perfomance)")
    g_input.add_argument('--fft-spikes-detector',
                         action='store_true',
                         default=False,
                         help='Turn on FFT based spike detector (slow).')

    g_outputs = parser.add_argument_group('mriqc specific outputs')
    g_outputs.add_argument('-w',
                           '--work-dir',
                           action='store',
                           default=op.join(os.getcwd(), 'work'))
    g_outputs.add_argument('--report-dir', action='store')
    g_outputs.add_argument('--verbose-reports',
                           default=False,
                           action='store_true')

    # ANTs options
    g_ants = parser.add_argument_group(
        'specific settings for ANTs registrations')
    g_ants.add_argument(
        '--ants-nthreads',
        action='store',
        type=int,
        default=DEFAULTS['ants_nthreads'],
        help='number of threads that will be set in ANTs processes')
    g_ants.add_argument('--ants-settings',
                        action='store',
                        help='path to JSON file with settings for ANTS')

    # AFNI head motion correction settings
    g_afni = parser.add_argument_group(
        'specific settings for AFNI head motion correction')
    g_afni.add_argument(
        '--deoblique',
        action='store_true',
        default=False,
        help='Deoblique the functional scans during head motion '
        'correction preprocessing')
    g_afni.add_argument(
        '--despike',
        action='store_true',
        default=False,
        help='Despike the functional scans during head motion correction '
        'preprocessing')
    g_afni.add_argument(
        '--start-idx',
        action='store',
        type=int,
        help='Initial volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument(
        '--stop-idx',
        action='store',
        type=int,
        help='Final volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument('--correct-slice-timing',
                        action='store_true',
                        default=False,
                        help='Perform slice timing correction')

    opts = parser.parse_args()

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = 0
    if opts.nthreads is not None:
        MRIQC_LOG.warn('Option --nthreads has been deprecated in mriqc 0.8.8. '
                       'Please use --n_procs instead.')
        n_procs = opts.nthreads
    if opts.n_procs is not None:
        n_procs = opts.n_procs

    # Check physical memory
    total_memory = opts.mem_gb
    if total_memory < 0:
        try:
            from psutil import virtual_memory
            total_memory = virtual_memory().total // (1024**3) + 1
        except ImportError:
            MRIQC_LOG.warn(
                'Total physical memory could not be estimated, using %d'
                'GB as default', DEFAULT_MEM_GB)
            total_memory = DEFAULT_MEM_GB

    if total_memory > 0:
        av_procs = total_memory // 4
        if av_procs < 1:
            MRIQC_LOG.warn(
                'Total physical memory is less than 4GB, memory allocation'
                ' problems are likely to occur.')
            n_procs = 1
        elif n_procs > av_procs:
            n_procs = av_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts.stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' %
                           ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })

    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = 1
            max_parallel_ants = cpu_count() // settings['ants_nthreads']
            if max_parallel_ants > 1:
                settings['n_procs'] = max_parallel_ants

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label,
        settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(settings['bids_dir'],
                                participant_label=opts.participant_label)

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod,
                               settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod,
                                          settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                workflow.run(**plugin_settings)
        else:
            raise RuntimeError(
                'Error reading BIDS directory (%s), or the dataset is not '
                'BIDS-compliant.' % settings['bids_dir'])

    # Set up group level
    if 'group' in analysis_levels:
        from mriqc.reports import group_html
        from mriqc.utils.misc import generate_csv, generate_pred

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                MRIQC_LOG.warn(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            MRIQC_LOG.info('Summary CSV table for the %s data generated (%s)',
                           mod, out_csv)

            out_pred = generate_pred(derivatives_dir, settings['output_dir'],
                                     mod)
            if out_pred is not None:
                MRIQC_LOG.info(
                    'Predicted QA CSV table for the %s data generated (%s)',
                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv,
                       mod,
                       csv_failed=op.join(settings['output_dir'],
                                          'failed_' + mod + '.csv'),
                       out_file=out_html)
            MRIQC_LOG.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception(
                "No data found. No group level reports were generated.")
def calc_local_metrics(
    preprocessed_data_dir,
    subject_id,
    parcellations_dict,
    bp_freq_list,
    fd_thresh,
    working_dir,
    ds_dir,
    use_n_procs,
    plugin_name,
):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import utils as calc_metrics_utils

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    wf = Workflow(name="LeiCA_LIFE_metrics")
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(
        logging=dict(workflow_level="DEBUG"),
        execution={"stop_on_first_crash": True, "remove_unnecessary_outputs": True, "job_finished_timeout": 15},
    )
    config.update_config(nipype_cfg)
    wf.config["execution"]["crashdump_dir"] = os.path.join(working_dir, "crash")

    ds = Node(nio.DataSink(base_directory=ds_dir), name="ds")
    ds.inputs.regexp_substitutions = [
        ("MNI_resampled_brain_mask_calc.nii.gz", "falff.nii.gz"),
        ("residual_filtered_3dT.nii.gz", "alff.nii.gz"),
        ("_parcellation_", ""),
        ("_bp_freqs_", "bp_"),
    ]

    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=["parcellation"]), name="parcellation_infosource")
    parcellation_infosource.iterables = ("parcellation", parcellations_dict.keys())

    bp_filter_infosource = Node(util.IdentityInterface(fields=["bp_freqs"]), name="bp_filter_infosource")
    bp_filter_infosource.iterables = ("bp_freqs", bp_freq_list)

    selectfiles = Node(
        nio.SelectFiles(
            {
                "parcellation_time_series": "{subject_id}/con_mat/parcellated_time_series/bp_{bp_freqs}/{parcellation}/parcellation_time_series.npy"
            },
            base_directory=preprocessed_data_dir,
        ),
        name="selectfiles",
    )
    selectfiles.inputs.subject_id = subject_id
    wf.connect(parcellation_infosource, "parcellation", selectfiles, "parcellation")
    wf.connect(bp_filter_infosource, "bp_freqs", selectfiles, "bp_freqs")

    fd_file = Node(
        nio.SelectFiles({"fd_p": "{subject_id}/QC/FD_P_ts"}, base_directory=preprocessed_data_dir), name="fd_file"
    )
    fd_file.inputs.subject_id = subject_id

    ##############
    ## CON MATS
    ##############
    ##############
    ## extract ts
    ##############

    get_good_trs = Node(
        util.Function(
            input_names=["fd_file", "fd_thresh"],
            output_names=["good_trs", "fd_scrubbed_file"],
            function=calc_metrics_utils.get_good_trs,
        ),
        name="get_good_trs",
    )
    wf.connect(fd_file, "fd_p", get_good_trs, "fd_file")
    get_good_trs.inputs.fd_thresh = fd_thresh

    parcellated_ts_scrubbed = Node(
        util.Function(
            input_names=["parcellation_time_series_file", "good_trs"],
            output_names=["parcellation_time_series_scrubbed"],
            function=calc_metrics_utils.parcellation_time_series_scrubbing,
        ),
        name="parcellated_ts_scrubbed",
    )

    wf.connect(selectfiles, "parcellation_time_series", parcellated_ts_scrubbed, "parcellation_time_series_file")
    wf.connect(get_good_trs, "good_trs", parcellated_ts_scrubbed, "good_trs")

    ##############
    ## get conmat
    ##############
    con_mat = Node(
        util.Function(
            input_names=["in_data", "extraction_method"],
            output_names=["matrix", "matrix_file"],
            function=calc_metrics_utils.calculate_connectivity_matrix,
        ),
        name="con_mat",
    )
    con_mat.inputs.extraction_method = "correlation"
    wf.connect(parcellated_ts_scrubbed, "parcellation_time_series_scrubbed", con_mat, "in_data")

    ##############
    ## ds
    ##############

    wf.connect(get_good_trs, "fd_scrubbed_file", ds, "QC.@fd_scrubbed_file")
    fd_str = ("%.1f" % fd_thresh).replace(".", "_")
    wf.connect(con_mat, "matrix_file", ds, "con_mat.matrix_scrubbed_%s.@mat" % fd_str)

    # wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    # wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    # wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == "CondorDAGMan":
        wf.run(plugin=plugin_name, plugin_args={"initial_specs": "request_memory = 1500"})
    if plugin_name == "MultiProc":
        wf.run(plugin=plugin_name, plugin_args={"n_procs": use_n_procs})
Example #23
0
import nipype.interfaces.fsl as fsl
import nipype.algorithms.rapidart as ra
from functions import motion_regressors, selectindex, nilearn_denoise, fix_hdr

# read in subjects and file names

subjects = [['P46', 'XXXX']]
sessions = ['d0']

# directories
working_dir = '/nobackup/eminem2/schmidt/MMPIRS/preprocessing/working_dir'
out_dir = '/nobackup/eminem2/schmidt/MMPIRS/preprocessing/final/'

# main workflow
preproc_nuisance_regress = Workflow(name='func_preproc_nuisance_regress')
preproc_nuisance_regress.base_dir = working_dir
preproc_nuisance_regress.config['execution'][
    'crashdump_dir'] = preproc_nuisance_regress.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subjectlist']),
                          name='subject_infosource')
subject_infosource.iterables = [('subjectlist', subjects)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']),
                          name='session_infosource')
session_infosource.iterables = [('session', sessions)]

# select files
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir,
                standard_brain):
    
    # main workflow
    struct_preproc = Workflow(name='mp2rage_preproc')
    struct_preproc.base_dir = working_dir
    struct_preproc.config['execution']['crashdump_dir'] = struct_preproc.base_dir + "/crash_files"
    
    # select files
    templates={'inv2': 'nifti/mp2rage/inv2.nii.gz',
               't1map': 'nifti/mp2rage/t1map.nii.gz',
               'uni': 'nifti/mp2rage/uni.nii.gz'}
    selectfiles = Node(nio.SelectFiles(templates,
                                       base_directory=data_dir),
                       name="selectfiles")
    
    # workflow for mp2rage background masking
    mp2rage=create_mp2rage_pipeline()
    
    # workflow to run freesurfer reconall
    reconall=create_reconall_pipeline()
    reconall.inputs.inputnode.fs_subjects_dir=freesurfer_dir
    reconall.inputs.inputnode.fs_subject_id=subject
    
    # workflow to get brain, head and wmseg from freesurfer and convert to nifti
    mgzconvert=create_mgzconvert_pipeline()
    
    # workflow to normalize anatomy to standard space
    normalize=create_normalize_pipeline()
    normalize.inputs.inputnode.standard = standard_brain
    
    #sink to store files
    sink = Node(nio.DataSink(base_directory=out_dir,
                             parameterization=False,
                             substitutions=[('outStripped', 'uni_stripped'),
                                            ('outMasked2', 'uni_masked'),
                                            ('outSignal2', 'background_mask'),
                                            ('outOriginal', 'uni_reoriented'),
                                            ('outMask', 'skullstrip_mask'),
                                            ('transform_Warped', 'T1_brain2mni')]),
                 name='sink')
    
    
    # connections
    struct_preproc.connect([(selectfiles, mp2rage, [('inv2', 'inputnode.inv2'),
                                                    ('t1map', 'inputnode.t1map'),
                                                    ('uni', 'inputnode.uni')]),
                            (mp2rage, reconall, [('outputnode.uni_masked', 'inputnode.anat')]),
                            (reconall, mgzconvert, [('outputnode.fs_subject_id', 'inputnode.fs_subject_id'),
                                                    ('outputnode.fs_subjects_dir', 'inputnode.fs_subjects_dir')]),
                            (mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
                            #(mp2rage, sink, [('outputnode.uni_masked', 'preprocessed.mp2rage.background_masking.@uni_masked'),
                            #                 ('outputnode.background_mask', 'preprocessed.mp2rage.background_masking.@background_mask')
                            #                 ]),
                            (mgzconvert, sink, [('outputnode.anat_head', 'preprocessed.anat.@head'),
                                                ('outputnode.anat_brain', 'preprocessed.anat.@brain'),
                                                ('outputnode.func_mask', 'preprocessed.anat.@func_mask'),
                                                ('outputnode.wmedge', 'preprocessed.anat.@wmedge'),
                                                #('outputnode.wmseg', 'preprocessed.mp2rage.brain_extraction.@wmseg')
                                                ]),
                            (normalize, sink, [('outputnode.anat2std', 'preprocessed.anat.@anat2std'),
                                               ('outputnode.anat2std_transforms', 'preprocessed.anat.transforms2mni.@anat2std_transforms'),
                                               ('outputnode.std2anat_transforms', 'preprocessed.anat.transforms2mni.@std2anat_transforms')])
                            ])
    #struct_preproc.write_graph(dotfilename='struct_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
    struct_preproc.run()
Example #25
0
experiment_dir = '/home/in/aeed/poldrack_gabmling/'

# subject_list = [
#                 'sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-06', 'sub-08', 'sub-09',
#                 'sub-10', 'sub-11', 'sub-13', 'sub-14', 'sub-15', 'sub-16']

subject_list = ['sub-02']

output_dir = '/home/in/aeed/poldrack_gabmling/output_MGT_poldrack_proc_2nd_level'
working_dir = '/home/in/aeed/poldrack_gabmling/workingdir_MGT_poldrack_proc_2nd_level'

no_runs = 3

proc_2nd_level = Workflow(name='proc_2nd_level')
proc_2nd_level.base_dir = opj(experiment_dir, working_dir)

#==========================================================================================================================================================
# In[3]:
#to prevent nipype from iterating over the anat image with each func run-, you need seperate
#nodes to select the files
#and this will solve the problem I have for almost 6 months
#but notice that in the sessions, you have to iterate also over subject_id to get the {subject_id} var

# Infosource - a function free node to iterate over the list of subject names

infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

#==========================================================================================================================================================
# In[4]:
# cutoff volumes = 1/(2*TR*cutoff in Hz)
# https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
lp = 1.0 / (2 * 1.4 * 0.1)
hp = 1.0 / (2 * 1.4 * 0.01)

bandpass_filter = Node(fsl.TemporalFilter(lowpass_sigma=lp, highpass_sigma=hp), name="bandpass_filter")
bandpass_filter.plugin_args = {"initial_specs": "request_memory = 30000"}


preproc.connect(remove_noise, "out_file", bandpass_filter, "in_file")
preproc.connect(bandpass_filter, "out_file", outputnode, "filtered_file")


###################################################################################################################################
# in and out
preproc.base_dir = "/scr/kansas1/huntenburg/"
preproc.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/"
data_dir = "/scr/"
subjects = ["LEMON006"]  # ,'LEMON001','LEMON087','LEMON030','LEMON044','LEMON071']


# infosource to iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subject_infosource")
subject_infosource.iterables = ("subject_id", subjects)


# infosource to iterate over coregistration methods
cor_method_infosource = Node(util.IdentityInterface(fields=["cor_method"]), name="cor_method_infosource")
cor_method_infosource.iterables = ("cor_method", ["lin_ts"])  # , 'lin_ts', 'nonlin_ts', 'fmap_ts', 'topup_ts'])
subjects=list(subjects['DB'])
subjects.remove('KSMT')

sessions = ['rest1_1', 'rest1_2', 'rest2_1', 'rest2_2']

# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/' 
data_dir= '/scr/ilz3/myelinconnect/'
final_dir = '/scr/ilz3/myelinconnect/mappings/rest2highres/'

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# main workflow
mapping2struct = Workflow(name='mapping2struct')
mapping2struct.base_dir = working_dir
mapping2struct.config['execution']['crashdump_dir'] = mapping2struct.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']), 
                  name='subject_infosource')
subject_infosource.iterables=[('subject', subjects)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']), 
                  name='session_infosource')
session_infosource.iterables=[('session', sessions)]

# select files
templates={'mapping': 'mappings/rest/fixed_hdr/corr_{subject}_{session}_roi_detrended_median_corrected_mapping_fixed.nii.gz',
           'epi2highres_lin_itk' : 'resting/preprocessed/{subject}/{session}/registration/epi2highres_lin.txt',
def create_rsfMRI_preproc_pipeline(working_dir, freesurfer_dir, ds_dir, use_fs_brainmask, name='rsfMRI_preprocessing'):
    # initiate workflow
    rsfMRI_preproc_wf = Workflow(name=name)
    rsfMRI_preproc_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')
    ds_dir = os.path.join(ds_dir, name)

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['epi',
                                                    't1w',
                                                    'subject_id',
                                                    'TR_ms',
                                                    'vols_to_drop',
                                                    'lat_ventricle_mask_MNI',
                                                    'lp_cutoff_freq',
                                                    'hp_cutoff_freq']),
                     name='inputnode')

    # outputnode
    outputnode = Node(util.IdentityInterface(fields=['epi_moco',
                                                     'rs_preprocessed',
                                                     'epi_2_MNI_warp']),
                      name='outputnode')


    # MOCO
    moco = create_moco_pipeline(working_dir, ds_dir, 'motion_correction')
    rsfMRI_preproc_wf.connect(inputnode, 'epi', moco, 'inputnode.epi')
    rsfMRI_preproc_wf.connect(inputnode, 'vols_to_drop', moco, 'inputnode.vols_to_drop')



    # STRUCT PREPROCESSING
    struct_preproc = create_struct_preproc_pipeline(working_dir, freesurfer_dir, ds_dir, use_fs_brainmask, 'struct_preproc')
    rsfMRI_preproc_wf.connect(inputnode, 't1w', struct_preproc, 'inputnode.t1w')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', struct_preproc, 'inputnode.subject_id')



    # REGISTRATIONS
    reg = create_registration_pipeline(working_dir, freesurfer_dir, ds_dir, 'registration')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.initial_mean_epi_moco', reg, 'inputnode.initial_mean_epi_moco')
    rsfMRI_preproc_wf.connect(inputnode, 't1w', reg, 'inputnode.t1w')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.t1w_brain', reg, 'inputnode.t1w_brain')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.wm_mask_4_bbr', reg, 'inputnode.wm_mask_4_bbr')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.struct_brain_mask', reg, 'inputnode.struct_brain_mask')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', reg, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.epi_2_MNI_warp', outputnode, 'epi_2_MNI_warp')



    # DESKULL EPI
    deskull = create_deskull_pipeline(working_dir, ds_dir, 'deskull')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.epi_moco', deskull, 'inputnode.epi_moco')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.struct_brain_mask', deskull, 'inputnode.struct_brain_mask')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.struct_2_epi_mat', deskull, 'inputnode.struct_2_epi_mat')




    # DENOISE
    denoise = create_denoise_pipeline(working_dir, ds_dir, 'denoise')
    rsfMRI_preproc_wf.connect(inputnode, 'TR_ms', denoise, 'inputnode.TR_ms')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', denoise, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(inputnode, 'lat_ventricle_mask_MNI', denoise, 'inputnode.lat_ventricle_mask_MNI')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.par_moco', denoise, 'inputnode.par_moco')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.epi_deskulled', denoise, 'inputnode.epi')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.mean_epi', denoise, 'inputnode.mean_epi')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.brain_mask_epiSpace', denoise, 'inputnode.brain_mask_epiSpace')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.struct_2_epi_mat', denoise, 'inputnode.struct_2_epi_mat')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.MNI_2_epi_warp', denoise, 'inputnode.MNI_2_epi_warp')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.wm_mask', denoise, 'inputnode.wm_mask')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.csf_mask', denoise, 'inputnode.csf_mask')
    rsfMRI_preproc_wf.connect(inputnode, 'lp_cutoff_freq', denoise, 'inputnode.lp_cutoff_freq')
    rsfMRI_preproc_wf.connect(inputnode, 'hp_cutoff_freq', denoise, 'inputnode.hp_cutoff_freq')

    rsfMRI_preproc_wf.connect(denoise, 'outputnode.rs_preprocessed', outputnode, 'rs_preprocessed')



    # QC
    qc = create_qc_pipeline(working_dir, ds_dir, 'qc')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', qc, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.par_moco', qc, 'inputnode.par_moco')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.epi_deskulled', qc, 'inputnode.epi_deskulled')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.brain_mask_epiSpace', qc, 'inputnode.brain_mask_epiSpace')
    rsfMRI_preproc_wf.connect([(struct_preproc, qc, [('outputnode.t1w_brain', 'inputnode.t1w_brain'),
                                                     ('outputnode.struct_brain_mask', 'inputnode.struct_brain_mask')])])
    rsfMRI_preproc_wf.connect([(reg, qc, [('outputnode.mean_epi_structSpace', 'inputnode.mean_epi_structSpace'),
                                          ('outputnode.mean_epi_MNIspace', 'inputnode.mean_epi_MNIspace'),
                                          ('outputnode.struct_MNIspace', 'inputnode.struct_MNIspace'),
                                          ('outputnode.struct_2_MNI_warp', 'inputnode.struct_2_MNI_warp')])])
    rsfMRI_preproc_wf.connect(denoise, 'outputnode.outlier_files', qc, 'inputnode.outlier_files')
    rsfMRI_preproc_wf.connect(denoise, 'outputnode.rs_preprocessed', qc, 'inputnode.rs_preprocessed')

    rsfMRI_preproc_wf.write_graph(dotfilename=rsfMRI_preproc_wf.name, graph2use='orig', format='pdf')
    rsfMRI_preproc_wf.write_graph(dotfilename=rsfMRI_preproc_wf.name, graph2use='colored', format='pdf')

    return rsfMRI_preproc_wf
    
# field_methodlist=[]
# for field in fields:
#     fieldlist=[]
#     for subject in subjects:
#         fieldlist.append(mask_dir+subject+'/fieldcompare/fields/'+field+'_field.nii.gz')
#     field_methodlist.append(fieldlist)
#         

'''basic workflow
=======================
'''

# create workflow
group = Workflow(name='group')
group.base_dir=working_dir

# sink
sink = Node(nio.DataSink(base_directory=out_dir,
                         parameterization=False), 
             name='sink')
 

'''groupmeans and sdv
=======================
'''

# merge means
merger = MapNode(fsl.Merge(dimension='t'),
                 iterfield=['in_files'],
                 name='merger')
                               relwarp=True,
                               out_file='topup_ts.nii.gz', 
                               datatype='float'),
                 name='apply_topup') 
   
apply_ts.connect([(inputnode, apply_topup, [('moco_ts', 'in_file'),
                                            ('topup_fullwarp', 'field_file')]),
                 (resamp_anat, apply_topup, [('out_file', 'ref_file')]),
                 (apply_topup, outputnode, [('out_file', 'topup_ts')])
                 ])
apply_topup.plugin_args={'initial_specs': 'request_memory = 8000'}



# set up workflow, in- and output
apply_ts.base_dir='/scr/kansas1/huntenburg/'
data_dir='/scr/jessica2/Schaare/LEMON/'
#out_dir = '/scr/kansas1/huntenburg/timeseries/'
#applywarp_linear.config['execution']={'remove_unnecessary_outputs': 'False'}
apply_ts.config['execution']['crashdump_dir'] = apply_ts.base_dir + "/crash_files"

# reading subjects from file
#subjects=['LEMON003']
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/done_freesurfer.txt','r')
for line in f:
    subjects.append(line.strip())
subjects.remove('LEMON007')
subjects.remove('LEMON027')

def learning_prepare_data_wf(working_dir,
                             ds_dir,
                             template_lookup_dict,
                             behav_file,
                             qc_file,
                             in_data_name_list,
                             data_lookup_dict,
                             use_n_procs,
                             plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from prepare_data_utils import vectorize_and_aggregate
    from itertools import chain

    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))


    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='learning_prepare_data_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': False,
                                                                       'job_finished_timeout': 120,
                                                                       'hash_method': 'timestamp'})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]

    ds_X = Node(nio.DataSink(), name='ds_X')
    ds_X.inputs.base_directory = os.path.join(ds_dir, 'vectorized_aggregated_data')

    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False


    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    mulitmodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='mulitmodal_in_data_name_infosource')
    mulitmodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)



    ###############################################################################################################
    # GET SUBJECTS INFO
    # create subjects list based on selection criteria

    def create_df_fct(behav_file, qc_file):
        import pandas as pd
        import os
        df = pd.read_pickle(behav_file)
        qc = pd.read_pickle(qc_file)
        df_all = qc.join(df, how='inner')

        assert df_all.index.is_unique, 'duplicates in df index. fix before cont.'

        df_all_subjects_pickle_file = os.path.abspath('df_all.pkl')
        df_all.to_pickle(df_all_subjects_pickle_file)

        full_subjects_list = df_all.index.values

        return df_all_subjects_pickle_file, full_subjects_list

    create_df = Node(util.Function(input_names=['behav_file', 'qc_file'],
                                   output_names=['df_all_subjects_pickle_file', 'full_subjects_list'],
                                   function=create_df_fct),
                     name='create_df')
    create_df.inputs.behav_file = behav_file
    create_df.inputs.qc_file = qc_file


    ###############################################################################################################
    # CREAE FILE LIST
    # of files that will be aggregted

    def create_file_list_fct(subjects_list, in_data_name, data_lookup_dict, template_lookup_dict):
        file_list = []
        for s in subjects_list:
            file_list.append(data_lookup_dict[in_data_name]['path_str'].format(subject_id=s))

        if 'matrix_name' in data_lookup_dict[in_data_name].keys():
            matrix_name = data_lookup_dict[in_data_name]['matrix_name']
        else:
            matrix_name = None

        if 'parcellation_path' in data_lookup_dict[in_data_name].keys():
            parcellation_path = data_lookup_dict[in_data_name]['parcellation_path']
        else:
            parcellation_path = None

        if 'fwhm' in data_lookup_dict[in_data_name].keys():
            fwhm = data_lookup_dict[in_data_name]['fwhm']
            if fwhm == 0:
                fwhm = None
        else:
            fwhm = None

        if 'mask_name' in data_lookup_dict[in_data_name].keys():
            mask_path = template_lookup_dict[data_lookup_dict[in_data_name]['mask_name']]
        else:
            mask_path = None

        if 'use_diagonal' in data_lookup_dict[in_data_name].keys():
            use_diagonal = data_lookup_dict[in_data_name]['use_diagonal']
        else:
            use_diagonal = False

        if 'use_fishers_z' in data_lookup_dict[in_data_name].keys():
            use_fishers_z = data_lookup_dict[in_data_name]['use_fishers_z']
        else:
            use_fishers_z = False

        if 'df_col_names' in data_lookup_dict[in_data_name].keys():
            df_col_names = data_lookup_dict[in_data_name]['df_col_names']
        else:
            df_col_names = None

        return file_list, matrix_name, parcellation_path, fwhm, mask_path, use_diagonal, use_fishers_z, df_col_names

    create_file_list = Node(util.Function(input_names=['subjects_list',
                                                       'in_data_name',
                                                       'data_lookup_dict',
                                                       'template_lookup_dict',
                                                       ],
                                          output_names=['file_list',
                                                        'matrix_name',
                                                        'parcellation_path',
                                                        'fwhm',
                                                        'mask_path',
                                                        'use_diagonal',
                                                        'use_fishers_z',
                                                        'df_col_names'],
                                          function=create_file_list_fct),
                            name='create_file_list')
    wf.connect(create_df, 'full_subjects_list', create_file_list, 'subjects_list')
    wf.connect(in_data_name_infosource, 'in_data_name', create_file_list, 'in_data_name')
    create_file_list.inputs.data_lookup_dict = data_lookup_dict
    create_file_list.inputs.template_lookup_dict = template_lookup_dict




    ###############################################################################################################
    # VECTORIZE AND AGGREGATE SUBJECTS
    # stack single subject np arrays vertically
    vectorize_aggregate_subjects = Node(util.Function(input_names=['in_data_file_list',
                                                                   'mask_file',
                                                                   'matrix_name',
                                                                   'parcellation_path',
                                                                   'fwhm',
                                                                   'use_diagonal',
                                                                   'use_fishers_z',
                                                                   'df_file',
                                                                   'df_col_names'],
                                                      output_names=['vectorized_aggregated_file',
                                                                    'unimodal_backprojection_info_file'],
                                                      function=vectorize_and_aggregate),
                                        name='vectorize_aggregate_subjects')
    wf.connect(create_file_list, 'file_list', vectorize_aggregate_subjects, 'in_data_file_list')
    wf.connect(create_file_list, 'mask_path', vectorize_aggregate_subjects, 'mask_file')
    wf.connect(create_file_list, 'matrix_name', vectorize_aggregate_subjects, 'matrix_name')
    wf.connect(create_file_list, 'parcellation_path', vectorize_aggregate_subjects, 'parcellation_path')
    wf.connect(create_file_list, 'fwhm', vectorize_aggregate_subjects, 'fwhm')
    wf.connect(create_file_list, 'use_diagonal', vectorize_aggregate_subjects, 'use_diagonal')
    wf.connect(create_file_list, 'use_fishers_z', vectorize_aggregate_subjects, 'use_fishers_z')
    wf.connect(create_df, 'df_all_subjects_pickle_file', vectorize_aggregate_subjects, 'df_file')
    wf.connect(create_file_list, 'df_col_names', vectorize_aggregate_subjects, 'df_col_names')

    wf.connect(create_df, 'df_all_subjects_pickle_file', ds_X, 'df_all_subjects_pickle_file')
    wf.connect(vectorize_aggregate_subjects, 'vectorized_aggregated_file', ds_X, 'X_file')
    wf.connect(vectorize_aggregate_subjects, 'unimodal_backprojection_info_file', ds_X, 'unimodal_backprojection_info_file')



    #####################################
    # RUN WF
    #####################################
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #32
0
workdir = '/scratch/PSB6351_2017/crash/mattfeld'
workingdir = os.path.join(workdir, 'antsreg')  #working directory
if not os.path.exists(workingdir):
    os.makedirs(workingdir)

fs_skullstrip_wf = create_freesurfer_skullstrip_workflow()
fs_skullstrip_wf.inputs.inputspec.subjects_dir = fs_projdir

sids = [
    'sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-06', 'sub-07',
    'sub-09', 'sub-10', 'sub-11', 'sub-12', 'sub-13', 'sub-14', 'sub-15'
]

# Set up the FreeSurfer skull stripper work flow
antsreg_wf = Workflow(name='antsreg_wf')
antsreg_wf.base_dir = workingdir

subjID_infosource = Node(
    IdentityInterface(fields=['subject_id', 'subjects_dir']),
    name='subjID_infosource')
subjID_infosource.iterables = ('subject_id', sids)

antsreg_wf.connect(subjID_infosource, 'subject_id', fs_skullstrip_wf,
                   'inputspec.subject_id')

# Use a JoinNode to aggregrate all of the outputs from the fs_skullstrip_wf
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.fixed_image = '/scratch/PSB6351_2017/ds008_R2.0.0/template/T1_template/study_template.nii.gz'
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
Example #33
0
def test_mapnode(config, moving_image, fixed_image):
    import nipype.interfaces.fsl as fsl
    from nipype.pipeline.engine import Node, Workflow, MapNode
    from nipype.interfaces.io import DataSink, DataGrabber
    from nipype.interfaces.utility import IdentityInterface, Function
    import os

    moving_mse = get_mseid(moving_image[0])
    fixed_mse = get_mseid(fixed_image[0])
    print(moving_mse, fixed_mse)
    seriesNum_moving = get_seriesnum(moving_image)
    seriesNum_fixed = get_seriesnum(fixed_image)
    print("seriesNum for moving and fixed are {}, {} respectively".format(seriesNum_moving, seriesNum_fixed))

    register = Workflow(name="test_mapnode")
    register.base_dir = config["working_directory"]
    inputnode = Node(IdentityInterface(fields=["moving_image", "fixed_image"]),
                     name="inputspec")
    inputnode.inputs.moving_image = moving_image
    inputnode.inputs.fixed_image = fixed_image

    check_len = Node(Function(input_names=["moving_image", "fixed_image"],
                              output_names=["new_moving_image", "new_fixed_image"], function=check_length),
                     name="check_len")
    register.connect(inputnode, 'moving_image', check_len, 'moving_image')
    register.connect(inputnode, 'fixed_image', check_len, 'fixed_image')

    flt_rigid = MapNode(fsl.FLIRT(), iterfield=['in_file', 'reference'], name="FLIRT_RIGID")
    flt_rigid.inputs.dof = 6
    flt_rigid.output_type = 'NIFTI_GZ'
    register.connect(check_len, 'new_moving_image', flt_rigid, 'in_file')
    register.connect(check_len, 'new_fixed_image', flt_rigid, 'reference')

    sinker = Node(DataSink(), name="DataSink")
    sinker.inputs.base_directory = '/data/henry7/james'
    sinker.inputs.container = 'test_mapnode'


    """
    def getsubs(moving_image, fixed_image, moving_mse, fixed_mse, seriesNum_moving, seriesNum_fixed):

        N = len(moving_image) * len(fixed_image)
        subs = []
        print("N is :" ,N)
        for i in range(N):
            for j in seriesNum_moving:
                seri_moving = ''
                if j != '':
                    seri_moving = '_' + j
                for k in seriesNum_fixed:
                    seri_fixed = ''
                    if k != '':
                        seri_fixed = '_' + k
                    subs += [('_FLIRT_RIGID%d'%i, moving_mse + seri_moving + '__' + fixed_mse + seri_fixed)]
        print("subs are: ", subs)
        return subs
    """

    def getsubs(moving_image, fixed_image, moving_mse, fixed_mse):
        N = len(moving_image) * len(fixed_image)
        subs = [('_flirt', '_trans')]
        if N == 1:
            subs += [('_FLIRT_RIGID%d'%0, moving_mse + '__' + fixed_mse)]
        else:
            for i in range(N):
                subs += [('_FLIRT_RIGID%d'%i, moving_mse + '__' + fixed_mse + '_' + str(i+1))]
        return subs

    get_subs = Node(Function(input_names=["moving_image", "fixed_image", "moving_mse", "fixed_mse"],
                             output_names=["subs"], function=getsubs),
                    name="get_subs")
    get_subs.inputs.moving_mse = moving_mse
    get_subs.inputs.fixed_mse = fixed_mse
    # get_subs.inputs.seriesNum_moving = seriesNum_moving
    # get_subs.inputs.seriesNum_fixed = seriesNum_fixed

    register.connect(inputnode, 'moving_image', get_subs, 'moving_image')
    register.connect(inputnode, 'fixed_image', get_subs, "fixed_image")
    register.connect(get_subs, 'subs', sinker, 'substitutions')
    register.connect(flt_rigid, 'out_file', sinker, '@mapnode_out')

    register.write_graph(graph2use='orig')
    register.config["Execution"] = {"keep_inputs": True, "remove_unnecessary_outputs": False}
    return register
# mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
# mlab.MatlabCommand.set_default_paths('/home/amr/Documents/MATLAB/toolbox/spm8')

#-----------------------------------------------------------------------------------------------------
# In[2]:
experiment_dir = '/media/amr/HDD/Work/Stimulation'  

subject_list = ['003','005','008','011','018','019','020', '059', '060','062','063','066']
session_list = ['run001', 'run002', 'run003']

                
output_dir  = '10Hz_10s_Task_Based_OutputDir'
working_dir = '10Hz_10s_Task_Based_WorkingDir'

preproc_task = Workflow(name = 'preproc_task')
preproc_task.base_dir = opj(experiment_dir, working_dir)

#-----------------------------------------------------------------------------------------------------
# In[20]:
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id','session_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list),
                        ('session_id', session_list)]

#-----------------------------------------------------------------------------------------------------
# In[21]:

templates = {

 'Anat_Bias' : '/media/amr/HDD/Work/Stimulation/Registration_Stimulation_WorkingDir/Registration_Stimulation/_subject_id_{subject_id}/BiasFieldCorrection/Anat_{subject_id}_bet_corrected.nii.gz',
Example #35
0
		copes,varcopes, tstats = [[] for i in range(3)]

		EV_indices = EV_rpe_design_df.index
		
		for c,contrast in enumerate(contrasts):

			if c in EV_indices:

				copes.append(os.path.join(sub_stats_dir,'%s%i.nii.gz'%('cope',contrast)))
				varcopes.append(os.path.join(sub_stats_dir,'%s%i.nii.gz'%('varcope',contrast)))
				tstats.append(os.path.join(sub_stats_dir,'%s%i.nii.gz'%('tstat',contrast)))

		###### NIPYPE WORKFLOW 1: RPE as mean-centered covariate ######

		Parkflow_rpe = Workflow(name='workflow')
		Parkflow_rpe.base_dir = sub_rpe_workflow_dir

		# Create nodes

		copemerge = Node(interface=fsl.Merge(
			dimension='t',
			in_files=copes),
			name='copemerge')
		varcopemerge = Node(interface=fsl.Merge(
			dimension='t',
			in_files=varcopes),
			name='varcopemerge')

		multregmodel = Node(interface=fsl.MultipleRegressDesign(
			contrasts=[],
			regressors={}),
Example #36
0
"""

#test only hippocampus_part
from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
from hc import create_transform_hc
from get_T1_brainmask import create_get_T1_brainmask

freesurfer_dir = "/data/pt_nro148/3T/restingstate_and_freesurfer/preprocessing/freesurfer"
subject = 'RSV002'
working_dir = "/data/pt_nro148/3T/restingstate_and_freesurfer/wd/hipp_connectivity/left/" + subject

hc_connec = Workflow(name='hc_connec')
hc_connec.base_dir = working_dir

get_T1_brainmask = create_get_T1_brainmask()
get_T1_brainmask.inputs.inputnode.fs_subjects_dir = freesurfer_dir
get_T1_brainmask.inputs.inputnode.fs_subject_id = subject

transform_hc = create_transform_hc()
transform_hc.inputs.inputnode.fs_subjects_dir = freesurfer_dir
transform_hc.inputs.inputnode.fs_subject_id = subject
transform_hc.inputs.inputnode.resolution = 2
transform_hc.inputs.inputnode.working_dir = working_dir

hc_connec.connect([(get_T1_brainmask, transform_hc,
                    [('outputnode.T1', 'inputnode.anat_head')])])

hc_connec.run()
from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import os

epireg = Workflow(name='epireg')
epireg.base_dir='/scr/ilz1/nonlinear_registration/lemon/working_dir_epireg/'
data_dir = '/scr/ilz1/nonlinear_registration/lemon/results/'
output_dir = '/scr/ilz1/nonlinear_registration/lemon/results/'
echo_space=0.00067 #in sec
te_diff=2.46 #in ms
pe_dir='y-'
flirt_pe_dir=-2
subjects=['LEMON001']
#subjects=os.listdir(data_dir)

# infosource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id']), 
                  name='infosource')
infosource.iterables=('subject_id', subjects)

# datasource to grab data
datasource = Node(nio.DataGrabber(infields=['subject_id'], 
                              outfields=['anat_head','anat_brain','epi_mean', 'fmap', 'mag_brain', 'mag_head'],
                              base_directory = os.path.abspath(data_dir),
                              template = '%s/%s/%s/%s',
                              template_args=dict(anat_head=[['subject_id','realignment', 'subject_id', '?']], 
                                                 anat_brain=[['subject_id','realignment', 'subject_id', '?']],
                                                 epi_mean=[['subject_id','realignment', 'subject_id', 'cmrrmbep2drestings007a001_mcf_mean.nii.gz']],
                                                 fmap=[['subject_id','fieldmap', 'subject_id', 'grefieldmappings004a2001_fslprepared.nii.gz']],
# In[ ]:

if sequence=='mux6':
    prepreprocflow = Workflow(name='prepreprocflow')
    prepreprocflow.connect([(infosource,select_pes, [('subject_id','subject_id')]),
                            (infosource,select_func, [('subject_id','subject_id')]),
                            (select_pes,trim_PEs, [('pes','in_file')]),
                            (trim_PEs,sort_pe_list, [('roi_file','pes')]),
                            (sort_pe_list,topup, [('merged_pes','in_file')]),
                            (topup, apply_topup, [('out_fieldcoef','in_topup_fieldcoef'),
                                                  ('out_movpar','in_topup_movpar')]),
                            (select_func, apply_topup, [('func','in_files')]),
                            (apply_topup, datasink, [('out_corrected','unwarped_funcs')])
                           ])

    prepreprocflow.base_dir = workflow_dir
    #prepreprocflow.write_graph(graph2use='flat')
    prepreprocflow.run('MultiProc', plugin_args={'n_procs': proc_cores, 'memory_gb':10})


# ### Basic Preprocessing

# In[ ]:

if sequence=='spiral':
    func_template = {'func': raw_data + '/%s/rest*.nii.gz'}
    select_func = Node(DataGrabber(sort_filelist=True,
                                   template = raw_data + '/%s/rest*.nii.gz',
                                   field_template = func_template,
                                   base_directory=raw_data,
                                   infields=['subject_id'],
Example #39
0
scans = ['4', '27']
recons = ['mag']

vol_to_remove = 10
motion_norm = 0.75
z_thr = 3
TR = 1

# directories
working_dir = '/home/julia/projects/real_data/working_dir/'
data_dir = '/home/julia/projects/real_data/mouse_visual/%s/' % dataset
out_dir = '/home/julia/projects/real_data/mouse_visual/%s/processed/func/' % dataset

# main workflow
preproc_func = Workflow(name='preproc_func')
preproc_func.base_dir = working_dir
preproc_func.config['execution'][
    'crashdump_dir'] = preproc_func.base_dir + "/crash_files"

# iterate over scans
scan_infosource = Node(util.IdentityInterface(fields=['scan']),
                       name='scan_infosource')
scan_infosource.iterables = [('scan', scans)]

# iterate over recons
recon_infosource = Node(util.IdentityInterface(fields=['recon']),
                        name='recon_infosource')
recon_infosource.iterables = [('recon', recons)]

# select files
templates = {
Example #40
0
def create_registration_pipeline(working_dir, freesurfer_dir, ds_dir, name='registration'):
    """
    find transformations between struct, funct, and MNI
    """

    # initiate workflow
    reg_wf = Workflow(name=name)
    reg_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting', 'rsfMRI_preprocessing')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['initial_mean_epi_moco',
                                                    't1w',
                                                    't1w_brain',
                                                    'subject_id',
                                                    'wm_mask_4_bbr',
                                                    'struct_brain_mask']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['struct_2_MNI_warp',
                                                     'epi_2_struct_mat',
                                                     'struct_2_epi_mat',
                                                     'epi_2_MNI_warp',
                                                     'MNI_2_epi_warp',
                                                     'fs_2_struct_mat',
                                                     'mean_epi_structSpace',
                                                     'mean_epi_MNIspace',
                                                     'struct_MNIspace']),
                      name='outputnode')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]




    ##########################################
    # TOC REGISTRATION MATS AND WARPS
    ##########################################
    # I. STRUCT -> MNI
    ## 1. STRUCT -> MNI with FLIRT
    ## 2. CALC. WARP STRUCT -> MNI with FNIRT

    # II.EPI -> STRUCT
    ## 3. calc EPI->STRUCT initial registration
    ## 4. run EPI->STRUCT via bbr
    ## 5. INVERT to get: STRUCT -> EPI

    # III. COMBINE I. & II.: EPI -> MNI
    ## 6. COMBINE MATS: EPI -> MNI
    ## 7. MNI -> EPI


    ##########################################
    # CREATE REGISTRATION MATS AND WARPS
    ##########################################

    # I. STRUCT -> MNI
    ##########################################
    # 1. REGISTER STRUCT -> MNI with FLIRT
    struct_2_MNI_mat = Node(fsl.FLIRT(dof=12), name='struct_2_MNI_mat')
    struct_2_MNI_mat.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')

    reg_wf.connect(inputnode, 't1w_brain', struct_2_MNI_mat, 'in_file')
    reg_wf.connect(struct_2_MNI_mat, 'out_matrix_file', outputnode, 'struct_2_MNI_mat_flirt')



    # 2. CALC. WARP STRUCT -> MNI with FNIRT
    # cf. wrt. 2mm
    # https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1311&L=FSL&P=R86108&1=FSL&9=A&J=on&d=No+Match%3BMatch%3BMatches&z=4
    struct_2_MNI_warp = Node(fsl.FNIRT(), name='struct_2_MNI_warp')
    struct_2_MNI_warp.inputs.config_file = 'T1_2_MNI152_2mm'
    struct_2_MNI_warp.inputs.ref_file = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    struct_2_MNI_warp.inputs.field_file = 'struct_2_MNI_warp.nii.gz'
    struct_2_MNI_warp.plugin_args = {'submit_specs': 'request_memory = 4000'}


    reg_wf.connect(inputnode, 't1w', struct_2_MNI_warp, 'in_file')
    reg_wf.connect(struct_2_MNI_mat, 'out_matrix_file', struct_2_MNI_warp, 'affine_file')
    reg_wf.connect(struct_2_MNI_warp, 'field_file', ds, 'registration.struct_2_MNI_warp')
    reg_wf.connect(struct_2_MNI_warp, 'field_file', outputnode, 'struct_2_MNI_warp')
    reg_wf.connect(struct_2_MNI_warp, 'warped_file', outputnode, 'struct_MNIspace')
    reg_wf.connect(struct_2_MNI_warp, 'warped_file', ds, 'registration.struct_MNIspace')


    # II.EPI -> STRUCT (via bbr)
    ##########################################

    # 3. calc EPI->STRUCT initial registration with flirt dof=6 and corratio
    epi_2_struct_flirt6_mat = Node(fsl.FLIRT(dof=6, cost='corratio'), name='epi_2_struct_flirt6_mat')
    epi_2_struct_flirt6_mat.inputs.out_file = 'epi_structSpace_flirt6.nii.gz'
    reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_flirt6_mat, 'reference')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_flirt6_mat, 'in_file')

    # 4. run EPI->STRUCT via bbr
    bbr_shedule = os.path.join(os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch')
    epi_2_struct_bbr_mat = Node(interface=fsl.FLIRT(dof=6, cost='bbr'), name='epi_2_struct_bbr_mat')
    epi_2_struct_bbr_mat.inputs.schedule = bbr_shedule
    epi_2_struct_bbr_mat.inputs.out_file = 'epi_structSpace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_bbr_mat, 'in_file')
    reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_bbr_mat, 'reference')
    reg_wf.connect(epi_2_struct_flirt6_mat, 'out_matrix_file', epi_2_struct_bbr_mat, 'in_matrix_file')
    reg_wf.connect(inputnode, 'wm_mask_4_bbr', epi_2_struct_bbr_mat, 'wm_seg')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', ds, 'registration.epi_2_struct_mat')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_file', outputnode, 'mean_epi_structSpace')


    # 5. INVERT to get: STRUCT -> EPI
    struct_2_epi_mat = Node(fsl.ConvertXFM(invert_xfm=True), name='struct_2_epi_mat')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', struct_2_epi_mat, 'in_file')
    reg_wf.connect(struct_2_epi_mat, 'out_file', outputnode, 'struct_2_epi_mat')


    # III. COMBINE I. & II.: EPI -> MNI
    ##########################################
    # 6. COMBINE MATS: EPI -> MNI
    epi_2_MNI_warp = Node(fsl.ConvertWarp(), name='epi_2_MNI_warp')
    epi_2_MNI_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', epi_2_MNI_warp, 'premat')  # epi2struct
    reg_wf.connect(struct_2_MNI_warp, 'field_file', epi_2_MNI_warp, 'warp1')  # struct2mni
    reg_wf.connect(epi_2_MNI_warp, 'out_file', outputnode, 'epi_2_MNI_warp')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', ds, 'registration.epi_2_MNI_warp')


    # output: out_file

    # 7. MNI -> EPI
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', MNI_2_epi_warp, 'warp')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_2_epi_warp, 'reference')
    reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', outputnode, 'MNI_2_epi_warp')
    # output: inverse_warp




    ##########################################
    # TRANSFORM VOLUMES
    ##########################################

    # CREATE STRUCT IN EPI SPACE FOR DEBUGGING
    struct_epiSpace = Node(fsl.ApplyXfm(), name='struct_epiSpace')
    struct_epiSpace.inputs.out_file = 'struct_brain_epiSpace.nii.gz'
    reg_wf.connect(inputnode, 't1w_brain', struct_epiSpace, 'in_file')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', struct_epiSpace, 'reference')
    reg_wf.connect(struct_2_epi_mat, 'out_file', struct_epiSpace, 'in_matrix_file')
    reg_wf.connect(struct_epiSpace, 'out_file', ds, 'QC.struct_brain_epiSpace')

    # CREATE EPI IN MNI SPACE
    mean_epi_MNIspace = Node(fsl.ApplyWarp(), name='mean_epi_MNIspace')
    mean_epi_MNIspace.inputs.ref_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
    mean_epi_MNIspace.inputs.out_file = 'mean_epi_MNIspace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', mean_epi_MNIspace, 'in_file')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', mean_epi_MNIspace, 'field_file')
    reg_wf.connect(mean_epi_MNIspace, 'out_file', ds, 'registration.mean_epi_MNIspace')
    reg_wf.connect(mean_epi_MNIspace, 'out_file', outputnode, 'mean_epi_MNIspace')



    # CREATE MNI IN EPI SPACE FOR DEBUGGING
    MNI_epiSpace = Node(fsl.ApplyWarp(), name='MNI_epiSpace')
    MNI_epiSpace.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
    MNI_epiSpace.inputs.out_file = 'MNI_epiSpace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_epiSpace, 'ref_file')
    reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', MNI_epiSpace, 'field_file')
    reg_wf.connect(MNI_epiSpace, 'out_file', ds, 'registration.MNI_epiSpace')



    reg_wf.write_graph(dotfilename=reg_wf.name, graph2use='flat', format='pdf')

    return reg_wf
onesamplettestdes = Node(OneSampleTTestDesign(), name="onesampttestdes")

# EstimateModel - estimate the parameters of the model
level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level2estimate")

# EstimateContrast - estimates simple group contrast
level2conestimate = Node(EstimateContrast(group_contrast=True),
                         name="level2conestimate")
cont1 = ['Group', 'T', ['mean'], [1]]
level2conestimate.inputs.contrasts = [cont1]

###
# Specify 2nd-Level Analysis Workflow & Connect Nodes
l2analysis = Workflow(name='l2analysis')
l2analysis.base_dir = opj(experiment_dir, working_dir)

# Connect up the 2nd-level analysis components
l2analysis.connect([
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
                                         ('residual_image', 'residual_image')
                                         ]),
])

###
# Input & Output Stream

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['contrast_id']), name="infosource")
Example #42
0
def downsampel_surfs(subject_id,
                     working_dir,
                     freesurfer_dir,
                     ds_dir,
                     plugin_name,
                     use_n_procs):
    '''
    Workflow resamples e.g. native thickness maps to fsaverage5 space
    '''

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    fs.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='freesurfer_downsample')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 15})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.parameterization = False



    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    infosource = Node(util.IdentityInterface(fields=['hemi', 'surf_measure', 'fwhm', 'target']), name='infosource')
    infosource.iterables = [('hemi', ['lh', 'rh']),
                            ('surf_measure', ['thickness', 'area']),
                            ('fwhm', [0, 5, 10, 20]),
                            ('target', ['fsaverage3', 'fsaverage4', 'fsaverage5']),
                            ]

    downsample = Node(fs.model.MRISPreproc(), name='downsample')
    downsample.inputs.subjects = [subject_id]
    wf.connect(infosource, 'target', downsample, 'target')
    wf.connect(infosource, 'hemi', downsample, 'hemi')
    wf.connect(infosource, 'surf_measure', downsample, 'surf_measure')
    wf.connect(infosource, 'fwhm', downsample, 'fwhm_source')

    rename = Node(util.Rename(format_string='%(hemi)s.%(surf_measure)s.%(target)s.%(fwhm)smm'), name='rename')
    rename.inputs.keep_ext = True
    wf.connect(infosource, 'target', rename, 'target')
    wf.connect(infosource, 'hemi', rename, 'hemi')
    wf.connect(infosource, 'surf_measure', rename, 'surf_measure')
    wf.connect(infosource, 'fwhm', rename, 'fwhm')
    wf.connect(downsample, 'out_file', rename, 'in_file')

    wf.connect(rename, 'out_file', ds, 'surfs.@surfs')





    #
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #43
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Example #44
0
subject_list = [
    '229', '230', '232', '233', '234', '235', '237', '242', '243', '244',
    '245', '252', '253', '255', '261', '262', '263', '264', '273', '274',
    '281', '282', '286', '287', '362', '363', '364', '365', '366', '236',
    '271', '272'
]

# subject_list = ['229', '230', '365', '274']

# subject_list = ['230', '365']

output_dir = 'Plus_Maze_output'
working_dir = 'Plus_Maze_workingdir'

Plus_Maze_workflow = Workflow(name='Plus_Maze_workflow')
Plus_Maze_workflow.base_dir = opj(experiment_dir, working_dir)

# -----------------------------------------------------------------------------------------------------
# In[3]:

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# -----------------------------------------------------------------------------------------------------
# In[4]:

templates = {'plus_maze': 'Data/{subject_id}/plus_maze_{subject_id}.avi'}

selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")
Example #45
0
# session_list = ['run001', 'run002', 'run003']
session_list = [
    'run001', 'run002', 'run003', 'run004', 'run005', 'run006', 'run007',
    'run008'
]

# session_list = ['run001', 'run002']

frequency_list = ['05Hz', '10Hz', '20Hz', '40Hz']
# frequency_list = ['40Hz']

output_dir = 'Stimulation_Preproc_OutputDir_CA3'
working_dir = 'Stimulation_Preproc_WorkingDir_CA3'

stimulation_preproc = Workflow(name='stimulation_preproc_CA3')
stimulation_preproc.base_dir = opj(experiment_dir, working_dir)

# =====================================================================================================
# In[3]:
# to prevent nipype from iterating over the anat image with each func run, you need seperate
# nodes to select the files
# and this will solve the problem I have for almost 6 months
# but notice that in the sessions, you have to iterate also over subject_id to get the {subject_id} var

# Infosource - a function free node to iterate over the list of subject names
infosource_anat = Node(IdentityInterface(fields=['subject_id']),
                       name="infosource_anat")
infosource_anat.iterables = [('subject_id', subject_list)]

infosource_func = Node(
    IdentityInterface(fields=['subject_id', 'session_id', 'frequency_id']),
Example #46
0
def preprocessing_pipeline(cfg):
    import os

    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    # LeiCA modules
    from utils import zip_and_save_running_scripts
    from preprocessing.rsfMRI_preprocessing import create_rsfMRI_preproc_pipeline
    from preprocessing.converter import create_converter_structural_pipeline, create_converter_functional_pipeline, \
        create_converter_diffusion_pipeline

    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subject_id = cfg['subject_id']
    TR_list = cfg['TR_list']

    vols_to_drop = cfg['vols_to_drop']
    lp_cutoff_freq = cfg['lp_cutoff_freq']
    hp_cutoff_freq = cfg['hp_cutoff_freq']
    use_fs_brainmask = cfg['use_fs_brainmask']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_resting')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={
                          'stop_on_first_crash': True,
                          'remove_unnecessary_outputs': True,
                          'job_finished_timeout': 120
                      })
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(
        working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')

    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']),
                           name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)

    #####################################
    # FETCH MRI DATA
    #####################################
    # GET LATERAL VENTRICLE MASK
    templates_atlases = {
        'lat_ventricle_mask_MNI':
        'cpac_image_resources/HarvardOxford-lateral-ventricles-thr25-2mm.nii.gz'
    }
    selectfiles_templates = Node(nio.SelectFiles(templates_atlases,
                                                 base_directory=template_dir),
                                 name="selectfiles_templates")

    if not True:  # releases 1-6 with 01... format subject_id
        # GET FUNCTIONAL DATA
        templates_funct = {
            'funct_dicom': '{subject_id}/session_1/RfMRI_*_{TR_id}'
        }

        selectfiles_funct = Node(nio.SelectFiles(templates_funct,
                                                 base_directory=dicom_dir),
                                 name="selectfiles_funct")
        selectfiles_funct.inputs.subject_id = subject_id

        wf.connect(scan_infosource, 'TR_id', selectfiles_funct, 'TR_id')

        # GET STRUCTURAL DATA
        templates_struct = {
            't1w_dicom': '{subject_id}/anat',
            'dMRI_dicom': '{subject_id}/session_1/DTI_mx_137/*.dcm'
        }  # *.dcm for dMRI as Dcm2nii requires this

        selectfiles_struct = Node(nio.SelectFiles(templates_struct,
                                                  base_directory=dicom_dir),
                                  name="selectfiles_struct")
        selectfiles_struct.inputs.subject_id = subject_id

    else:  #startin with release 6 new folder structure
        templates_funct = {
            'funct_dicom': '*/{subject_id}/*_V2/REST_{TR_id}*/*.dcm'
        }

        selectfiles_funct = Node(nio.SelectFiles(templates_funct,
                                                 base_directory=dicom_dir),
                                 name="selectfiles_funct")
        selectfiles_funct.inputs.subject_id = subject_id

        wf.connect(scan_infosource, 'TR_id', selectfiles_funct, 'TR_id')

        # GET STRUCTURAL DATA
        templates_struct = {
            't1w_dicom': '*/{subject_id}/*_V2/MPRAGE_SIEMENS_DEFACED*/*.dcm',
            'dMRI_dicom': '*/{subject_id}/*_V2/DIFF_137_AP*/*.dcm'
        }  # *.dcm for dMRI as Dcm2nii requires this

        selectfiles_struct = Node(nio.SelectFiles(templates_struct,
                                                  base_directory=dicom_dir),
                                  name="selectfiles_struct")
        selectfiles_struct.inputs.subject_id = subject_id

    #####################################
    # COPY RUNNING SCRIPTS
    #####################################
    copy_scripts = Node(util.Function(input_names=['subject_id', 'script_dir'],
                                      output_names=['zip_file'],
                                      function=zip_and_save_running_scripts),
                        name='copy_scripts')
    copy_scripts.inputs.script_dir = script_dir
    copy_scripts.inputs.subject_id = subject_id
    wf.connect(copy_scripts, 'zip_file', ds, 'scripts')

    #####################################
    # CONVERT DICOMs
    #####################################
    # CONVERT STRUCT 2 NIFTI
    converter_struct = create_converter_structural_pipeline(
        working_dir, ds_dir, 'converter_struct')
    wf.connect(selectfiles_struct, 't1w_dicom', converter_struct,
               'inputnode.t1w_dicom')

    # CONVERT dMRI 2 NIFTI
    converter_dMRI = create_converter_diffusion_pipeline(
        working_dir, ds_dir, 'converter_dMRI')
    wf.connect(selectfiles_struct, 'dMRI_dicom', converter_dMRI,
               'inputnode.dMRI_dicom')

    # CONVERT FUNCT 2 NIFTI
    converter_funct = create_converter_functional_pipeline(
        working_dir, ds_dir, 'converter_funct')
    wf.connect(selectfiles_funct, 'funct_dicom', converter_funct,
               'inputnode.epi_dicom')
    wf.connect(scan_infosource, 'TR_id', converter_funct,
               'inputnode.out_format')

    #####################################
    # START RSFMRI PREPROCESSING ANALYSIS
    #####################################
    # rsfMRI PREPROCESSING
    rsfMRI_preproc = create_rsfMRI_preproc_pipeline(working_dir,
                                                    freesurfer_dir, ds_dir,
                                                    use_fs_brainmask,
                                                    'rsfMRI_preprocessing')
    rsfMRI_preproc.inputs.inputnode.vols_to_drop = vols_to_drop
    rsfMRI_preproc.inputs.inputnode.lp_cutoff_freq = lp_cutoff_freq
    rsfMRI_preproc.inputs.inputnode.hp_cutoff_freq = hp_cutoff_freq
    rsfMRI_preproc.inputs.inputnode.subject_id = subject_id

    wf.connect(converter_struct, 'outputnode.t1w', rsfMRI_preproc,
               'inputnode.t1w')
    wf.connect(converter_funct, 'outputnode.epi', rsfMRI_preproc,
               'inputnode.epi')
    wf.connect(converter_funct, 'outputnode.TR_ms', rsfMRI_preproc,
               'inputnode.TR_ms')
    wf.connect(selectfiles_templates, 'lat_ventricle_mask_MNI', rsfMRI_preproc,
               'inputnode.lat_ventricle_mask_MNI')

    #####################################
    # RUN WF
    #####################################
    wf.write_graph(dotfilename=wf.name, graph2use='colored',
                   format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
map_list = [
    'CHARMED_AD', 'CHARMED_FA', 'CHARMED_FR', 'CHARMED_IAD', 'CHARMED_MD',
    'CHARMED_RD', 'Diffusion_20_AD', 'Diffusion_20_FA', 'Diffusion_20_MD',
    'Diffusion_20_RD', 'Kurtosis_AD', 'Kurtosis_AWF', 'Kurtosis_MD',
    'Kurtosis_RD', 'Kurtosis_TORT', 'Kurtosis_AK', 'Kurtosis_FA',
    'Kurtosis_MK', 'Kurtosis_RK', 'NODDI_FICVF', 'NODDI_ODI'
]

# map_list = ['229', '230', '365', '274']

output_dir = 'DTI_TBSS_Wax'
working_dir = 'DTI_TBSS_workingdir_Wax_Template'

DTI_TBSS_Wax = Workflow(name='DTI_TBSS_Wax')
DTI_TBSS_Wax.base_dir = opj(experiment_dir, working_dir)

#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
# In[3]:

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['map_id']), name="infosource")
infosource.iterables = [('map_id', map_list)]

#-----------------------------------------------------------------------------------------------------
# In[4]:

templates = {
        

# apply warp to timeseries
# applytransform_ts = Node(ants.WarpTimeSeriesImageMultiTransform(dimension=4),
#                          name='applytransform_ts')
#    
#    
# applywarp_linear.connect([(inputnode, applytransform_ts, [('epi_moco_ts','input_image')]),
#                           (transformlist, applytransform_ts, [('transformlist', 'transformation_series')]),
#                           (inputnode, applytransform_ts, [('mni', 'reference_image')]),
#                           (applytransform_ts, outputnode, [('output_image','lin_ts_fullwarped')])
#                           ])


# set up workflow, in- and output
applywarp_linear.base_dir='/scr/kansas1/huntenburg/lemon_missing/working_dir/'
data_dir='/scr/jessica2/Schaare/LEMON/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#applywarp_linear.config['execution']={'remove_unnecessary_outputs': 'False'}
applywarp_linear.config['execution']['crashdump_dir'] = applywarp_linear.base_dir + "/crash_files"

# reading subjects from file
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/missing_subjects.txt','r')
for line in f:
    subjects.append(line.strip())


# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id']), 
                  name='infosource')
Example #49
0
                                    terminal_output='file'),
                    name='apply2con', iterfield=['input_image'])

# Apply Transformation - applies the normalization matrix to the mean image
apply2mean = Node(ApplyTransforms(args='--float',
                                  input_image_type=3,
                                  interpolation='Linear',
                                  invert_transform_flags=[False],
                                  num_threads=1,
                                  reference_image=template,
                                  terminal_output='file'),
                  name='apply2mean')

# Initiation of the ANTS normalization workflow
normflow = Workflow(name='normflow')
normflow.base_dir = opj(experiment_dir, working_dir)

# Connect up ANTS normalization components
normflow.connect([(antsreg, apply2con, [('composite_transform', 'transforms')]),
                  (antsreg, apply2mean, [('composite_transform',
                                          'transforms')])
                  ])

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - to grab the data (alternativ to DataGrabber)
anat_file = opj('freesurfer', '{subject_id}', 'mri/brain.mgz')
func_file = opj(input_dir_1st, 'contrasts', '{subject_id}',
Example #50
0
def calc_centrality_metrics(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    import CPAC.network_centrality.resting_state_centrality as cpac_centrality
    import CPAC.network_centrality.z_score as cpac_centrality_z_score


    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    preprocessed_data_dir = cfg['preprocessed_data_dir']

    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subjects_list = cfg['subjects_list']
    TR_list = cfg['TR_list']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']



    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]
    ds.inputs.regexp_substitutions = [('_subject_id_[A0-9]*/', ''), (
    '_z_score[0-9]*/', '')]  # , #('dc/_TR_id_[0-9]*/', ''), ('evc/_TR_id_[0-9]*/','')]

    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)

    subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
    subjects_infosource.iterables = ('subject_id', subjects_list)

    def add_subject_id_to_ds_dir_fct(subject_id, ds_path):
        import os
        out_path = os.path.join(ds_path, subject_id)
        return out_path

    add_subject_id_to_ds_dir = Node(util.Function(input_names=['subject_id', 'ds_path'],
                                                  output_names=['out_path'],
                                                  function=add_subject_id_to_ds_dir_fct),
                                    name='add_subject_id_to_ds_dir')
    wf.connect(subjects_infosource, 'subject_id', add_subject_id_to_ds_dir, 'subject_id')
    add_subject_id_to_ds_dir.inputs.ds_path = ds_dir

    wf.connect(add_subject_id_to_ds_dir, 'out_path', ds, 'base_directory')


    # get atlas data
    templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                         'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                         'FSL_MNI_3mm_template': 'MNI152_T1_3mm_brain.nii.gz',
                         'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
                         'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
                         'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
                         'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
                         'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
                         }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")


    # GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
    selectfiles_templates = {
        'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
        'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
        'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
        'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
        't1w_brain': '{subject_id}/rsfMRI_preprocessing/struct_prep/t1w_brain/t1w_reoriented_maths.nii.gz',
        'epi_bp_tNorm_MNIspace_3mm': '{subject_id}/rsfMRI_preprocessing/epis_MNI_3mm/03_denoised_BP_tNorm/TR_645/residual_filt_norm_warp.nii.gz'
    }

    selectfiles = Node(nio.SelectFiles(selectfiles_templates,
                                       base_directory=preprocessed_data_dir),
                       name="selectfiles")
    wf.connect(scan_infosource, 'TR_id', selectfiles, 'TR_id')
    wf.connect(subjects_infosource, 'subject_id', selectfiles, 'subject_id')
    # selectfiles.inputs.subject_id = subject_id

    # CREATE TRANSFORMATIONS
    # creat MNI 2 epi warp
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    wf.connect(selectfiles, 'epi_mask', MNI_2_epi_warp, 'reference')
    wf.connect(selectfiles, 'epi_2_MNI_warp', MNI_2_epi_warp, 'warp')

    #####################
    # CALCULATE METRICS
    #####################

    # DEGREE
    # fixme
    # a_mem = 5
    # fixme
    a_mem = 20
    dc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem,
                                                     wf_name='dc')  # allocated_memory = a_mem, wf_name = 'dc')
    # dc.plugin_args = {'submit_specs': 'request_memory = 6000'}
    # fixme
    dc.plugin_args = {'submit_specs': 'request_memory = 20000'}

    dc.inputs.inputspec.method_option = 0  # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
    dc.inputs.inputspec.threshold_option = 0  # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
    dc.inputs.inputspec.threshold = 0.0001
    dc.inputs.inputspec.weight_options = [True,
                                          True]  # list of two booleans for binarize and weighted options respectively
    wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', dc, 'inputspec.subject')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc, 'inputspec.template')
    wf.connect(dc, 'outputspec.centrality_outputs', ds, 'metrics.centrality.dc.@centrality_outputs')
    wf.connect(dc, 'outputspec.correlation_matrix', ds, 'metrics.centrality.dc.@correlation_matrix')
    wf.connect(dc, 'outputspec.graph_outputs', ds, 'metrics.centrality.dc.@graph_outputs')

    # DC Z-SCORE
    dc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='dc_Z')
    wf.connect(dc, 'outputspec.centrality_outputs', dc_Z, 'inputspec.input_file')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc_Z, 'inputspec.mask_file')
    wf.connect(dc_Z, 'outputspec.z_score_img', ds, 'metrics.centrality.dc_z.@output')

    a_mem = 20
    evc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem, wf_name='evc')
    evc.plugin_args = {'submit_specs': 'request_memory = 20000'}

    evc.inputs.inputspec.method_option = 1  # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
    evc.inputs.inputspec.threshold_option = 0  # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
    evc.inputs.inputspec.threshold = 0.0001
    evc.inputs.inputspec.weight_options = [True,
                                           True]  # list of two booleans for binarize and weighted options respectively
    wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', evc, 'inputspec.subject')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc, 'inputspec.template')
    wf.connect(evc, 'outputspec.centrality_outputs', ds, 'metrics.centrality.evc.@centrality_outputs')
    wf.connect(evc, 'outputspec.correlation_matrix', ds, 'metrics.centrality.evc.@correlation_matrix')
    wf.connect(evc, 'outputspec.graph_outputs', ds, 'metrics.centrality.evc.@graph_outputs')

    # EVC Z-SCORE
    evc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='evc_Z')
    wf.connect(evc, 'outputspec.centrality_outputs', evc_Z, 'inputspec.input_file')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc_Z, 'inputspec.mask_file')
    wf.connect(evc_Z, 'outputspec.z_score_img', ds, 'metrics.centrality.evc_z.@output')

    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #51
0
subjects_db=list(df['DB'])

# sessions to loop over
sessions=['rest1_1'] # ,'rest1_2', 'rest2_1', 'rest2_2']

# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/' 
data_dir= '/scr/ilz3/myelinconnect/'
out_dir = '/scr/ilz3/myelinconnect/final_struct_space/rest1_1_trans'

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# main workflow
smooth = Workflow(name='smooth')
smooth.base_dir = working_dir
smooth.config['execution']['crashdump_dir'] = smooth.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']), 
                  name='subject_infosource')
subject_infosource.iterables=[('subject', subjects_db)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']), 
                  name='session_infosource')
session_infosource.iterables=[('session', sessions)]

# select files
templates={'rest': 'final_struct_space/rest1_1_trans/{subject}_{session}_denoised_trans.nii.gz'
           }    
Example #52
0
def calc_local_metrics(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    import CPAC.alff.alff as cpac_alff
    import CPAC.reho.reho as cpac_reho
    import CPAC.utils.utils as cpac_utils
    import CPAC.vmhc.vmhc as cpac_vmhc
    import CPAC.registration.registration as cpac_registration
    import CPAC.network_centrality.z_score as cpac_centrality_z_score

    import utils as calc_metrics_utils


    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    preprocessed_data_dir = cfg['preprocessed_data_dir']

    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subject_id = cfg['subject_id']
    TR_list = cfg['TR_list']

    vols_to_drop = cfg['vols_to_drop']
    rois_list = cfg['rois_list']
    lp_cutoff_freq = cfg['lp_cutoff_freq']
    hp_cutoff_freq = cfg['hp_cutoff_freq']
    use_fs_brainmask = cfg['use_fs_brainmask']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']



    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]
    ds.inputs.regexp_substitutions = [('_variabilty_MNIspace_3mm[0-9]*/', ''), ('_z_score[0-9]*/', '')]


    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)



    # get atlas data
    templates_atlases = {  # 'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                           # 'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                           'FSL_MNI_3mm_template': 'MNI152_T1_3mm_brain.nii.gz',
                           'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
                           'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
                           'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
                           'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
                           'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
                           }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")


    # GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
    selectfiles_templates = {
        'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
        'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
        'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
        'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
        't1w_brain': '{subject_id}/rsfMRI_preprocessing/struct_prep/t1w_brain/t1w_reoriented_maths.nii.gz',
    }

    selectfiles = Node(nio.SelectFiles(selectfiles_templates,
                                       base_directory=preprocessed_data_dir),
                       name="selectfiles")
    wf.connect(scan_infosource, 'TR_id', selectfiles, 'TR_id')
    selectfiles.inputs.subject_id = subject_id



    # CREATE TRANSFORMATIONS
    # creat MNI 2 epi warp
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    wf.connect(selectfiles, 'epi_mask', MNI_2_epi_warp, 'reference')
    wf.connect(selectfiles, 'epi_2_MNI_warp', MNI_2_epi_warp, 'warp')


    # # CREATE GM MASK IN EPI SPACE
    # GM_mask_epiSpace = Node(fsl.ApplyWarp(), name='GM_mask_epiSpace')
    # GM_mask_epiSpace.inputs.out_file = 'GM_mask_epiSpace.nii.gz'
    #
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_2mm', GM_mask_epiSpace, 'in_file')
    # wf.connect(selectfiles, 'epi_mask', GM_mask_epiSpace, 'ref_file')
    # wf.connect(MNI_2_epi_warp, 'inverse_warp', GM_mask_epiSpace, 'field_file')
    # wf.connect(GM_mask_epiSpace, 'out_file', ds, 'GM_mask_epiSpace')



    # fixme
    # # CREATE TS IN MNI SPACE
    # # is it ok to apply the 2mm warpfield to the 3mm template?
    # # seems ok: https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind0904&L=FSL&P=R14011&1=FSL&9=A&J=on&d=No+Match%3BMatch%3BMatches&z=4
    # epi_bp_MNIspace_3mm = Node(fsl.ApplyWarp(), name='epi_bp_MNIspace_3mm')
    # epi_bp_MNIspace_3mm.inputs.interp = 'spline'
    # epi_bp_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    # wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', epi_bp_MNIspace_3mm, 'ref_file')
    # wf.connect(selectfiles, 'preproc_epi_bp', epi_bp_MNIspace_3mm, 'in_file')
    # wf.connect(selectfiles, 'epi_2_MNI_warp', epi_bp_MNIspace_3mm, 'field_file')


    # CREATE EPI MASK IN MNI SPACE
    epi_mask_MNIspace_3mm = Node(fsl.ApplyWarp(), name='epi_mask_MNIspace_3mm')
    epi_mask_MNIspace_3mm.inputs.interp = 'nn'
    epi_mask_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', epi_mask_MNIspace_3mm, 'ref_file')
    wf.connect(selectfiles, 'epi_mask', epi_mask_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', epi_mask_MNIspace_3mm, 'field_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', ds, 'epi_mask_MNIspace_3mm')


    #####################
    # CALCULATE METRICS
    #####################

    # f/ALFF
    alff = cpac_alff.create_alff('alff')
    alff.inputs.hp_input.hp = 0.01
    alff.inputs.lp_input.lp = 0.1
    wf.connect(selectfiles, 'preproc_epi_full_spectrum', alff, 'inputspec.rest_res')
    # wf.connect(GM_mask_epiSpace, 'out_file', alff, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', alff, 'inputspec.rest_mask')
    wf.connect(alff, 'outputspec.alff_img', ds, 'alff.alff')
    wf.connect(alff, 'outputspec.falff_img', ds, 'alff.falff')



    # f/ALFF 2 MNI
    # fixme spline or default?
    alff_MNIspace_3mm = Node(fsl.ApplyWarp(), name='alff_MNIspace_3mm')
    alff_MNIspace_3mm.inputs.interp = 'spline'
    alff_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', alff_MNIspace_3mm, 'ref_file')
    wf.connect(alff, 'outputspec.alff_img', alff_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', alff_MNIspace_3mm, 'field_file')
    wf.connect(alff_MNIspace_3mm, 'out_file', ds, 'alff.alff_MNI_3mm')

    falff_MNIspace_3mm = Node(fsl.ApplyWarp(), name='falff_MNIspace_3mm')
    falff_MNIspace_3mm.inputs.interp = 'spline'
    falff_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', falff_MNIspace_3mm, 'ref_file')
    wf.connect(alff, 'outputspec.falff_img', falff_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', falff_MNIspace_3mm, 'field_file')
    wf.connect(falff_MNIspace_3mm, 'out_file', ds, 'alff.falff_MNI_3mm')



    # f/ALFF_MNI Z-SCORE
    alff_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='alff_MNIspace_3mm', wf_name='alff_MNIspace_3mm_Z')
    wf.connect(alff_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', alff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(alff_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'alff.alff_MNI_3mm_Z')

    falff_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='falff_MNIspace_3mm', wf_name='falff_MNIspace_3mm_Z')
    wf.connect(falff_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', falff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(falff_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'alff.falff_MNI_3mm_Z')


    # f/ALFF_MNI STANDARDIZE BY MEAN
    alff_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='alff_MNIspace_3mm_standardized_mean')
    wf.connect(alff_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(alff_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'alff.alff_MNI_3mm_standardized_mean')

    falff_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='falff_MNIspace_3mm_standardized_mean')
    wf.connect(falff_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(falff_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'alff.falff_MNI_3mm_standardized_mean')





    # REHO
    reho = cpac_reho.create_reho()
    reho.inputs.inputspec.cluster_size = 27
    wf.connect(selectfiles, 'preproc_epi_bp', reho, 'inputspec.rest_res_filt')
    # wf.connect(GM_mask_epiSpace, 'out_file', reho, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', reho, 'inputspec.rest_mask')
    wf.connect(reho, 'outputspec.raw_reho_map', ds, 'reho.reho')



    # REHO 2 MNI
    # fixme spline or default?
    reho_MNIspace_3mm = Node(fsl.ApplyWarp(), name='reho_MNIspace_3mm')
    reho_MNIspace_3mm.inputs.interp = 'spline'
    reho_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', reho_MNIspace_3mm, 'ref_file')
    wf.connect(reho, 'outputspec.raw_reho_map', reho_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', reho_MNIspace_3mm, 'field_file')
    wf.connect(reho_MNIspace_3mm, 'out_file', ds, 'reho.reho_MNI_3mm')



    # REHO_MNI Z-SCORE
    reho_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='reho_MNIspace_3mm', wf_name='reho_MNIspace_3mm_Z')
    wf.connect(alff_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', reho_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(reho_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'reho.reho_MNI_3mm_Z')



    # REHO_MNI STANDARDIZE BY MEAN
    reho_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='reho_MNIspace_3mm_standardized_mean')
    wf.connect(reho_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(reho_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'reho.reho_MNI_3mm_standardized_mean')



    # VMHC
    # create registration to symmetrical MNI template
    struct_2_MNI_symm = cpac_registration.create_nonlinear_register(name='struct_2_MNI_symm')
    wf.connect(selectfiles_anat_templates, 'vmhc_config_file_2mm', struct_2_MNI_symm, 'inputspec.fnirt_config')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain', struct_2_MNI_symm, 'inputspec.reference_brain')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_skull', struct_2_MNI_symm, 'inputspec.reference_skull')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain_mask_dil', struct_2_MNI_symm, 'inputspec.ref_mask')
    wf.connect(selectfiles, 't1w', struct_2_MNI_symm, 'inputspec.input_skull')
    wf.connect(selectfiles, 't1w_brain', struct_2_MNI_symm, 'inputspec.input_brain')

    wf.connect(struct_2_MNI_symm, 'outputspec.output_brain', ds, 'vmhc.symm_reg.@output_brain')
    wf.connect(struct_2_MNI_symm, 'outputspec.linear_xfm', ds, 'vmhc.symm_reg.@linear_xfm')
    wf.connect(struct_2_MNI_symm, 'outputspec.invlinear_xfm', ds, 'vmhc.symm_reg.@invlinear_xfm')
    wf.connect(struct_2_MNI_symm, 'outputspec.nonlinear_xfm', ds, 'vmhc.symm_reg.@nonlinear_xfm')



    # fixme
    vmhc = cpac_vmhc.create_vmhc(use_ants=False, name='vmhc')
    vmhc.inputs.fwhm_input.fwhm = 4
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain_3mm', vmhc, 'inputspec.standard_for_func')
    wf.connect(selectfiles, 'preproc_epi_bp_tNorm', vmhc, 'inputspec.rest_res')
    wf.connect(selectfiles, 'epi_2_struct_mat', vmhc, 'inputspec.example_func2highres_mat')
    wf.connect(struct_2_MNI_symm, 'outputspec.nonlinear_xfm', vmhc, 'inputspec.fnirt_nonlinear_warp')
    # wf.connect(GM_mask_epiSpace, 'out_file', vmhc, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', vmhc, 'inputspec.rest_mask')

    wf.connect(vmhc, 'outputspec.rest_res_2symmstandard', ds, 'vmhc.rest_res_2symmstandard')
    wf.connect(vmhc, 'outputspec.VMHC_FWHM_img', ds, 'vmhc.VMHC_FWHM_img')
    wf.connect(vmhc, 'outputspec.VMHC_Z_FWHM_img', ds, 'vmhc.VMHC_Z_FWHM_img')
    wf.connect(vmhc, 'outputspec.VMHC_Z_stat_FWHM_img', ds, 'vmhc.VMHC_Z_stat_FWHM_img')



    # VARIABILITY SCORES
    variability = Node(util.Function(input_names=['in_file'],
                                     output_names=['out_file_list'],
                                     function=calc_metrics_utils.calc_variability),
                       name='variability')
    wf.connect(selectfiles, 'preproc_epi_bp', variability, 'in_file')
    wf.connect(variability, 'out_file_list', ds, 'variability.subjectSpace.@out_files')


    # #fixme spline?
    variabilty_MNIspace_3mm = MapNode(fsl.ApplyWarp(), iterfield=['in_file'], name='variabilty_MNIspace_3mm')
    variabilty_MNIspace_3mm.inputs.interp = 'spline'
    variabilty_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', variabilty_MNIspace_3mm, 'ref_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', variabilty_MNIspace_3mm, 'field_file')
    wf.connect(variability, 'out_file_list', variabilty_MNIspace_3mm, 'in_file')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', ds, 'variability.MNI_3mm.@out_file')


    # CALC Z SCORE
    variabilty_MNIspace_3mm_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='variabilty_MNIspace_3mm_Z')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', variabilty_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(variabilty_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'variability.MNI_3mm_Z.@out_file')



    # STANDARDIZE BY MEAN
    variabilty_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='variabilty_MNIspace_3mm_standardized_mean')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(variabilty_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds,
               'variability.MNI_3mm_standardized_mean.@out_file')

    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #53
0
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 11:39:05 2015

@author: craigmoodie
"""

from nipype.pipeline.engine import Workflow, Node, MapNode
from variables import data_dir, work_dir, subject_list, plugin, plugin_args


surface_workflow = Workflow(name="qc_workflow")

surface_workflow.base_dir = work_dir


from nipype import SelectFiles
templates = dict(T1="*_{subject_id}_*/T1w_MPR_BIC_v1/*00001.nii*")
file_list = Node(SelectFiles(templates), name = "EPI_and_T1_File_Selection")
file_list.inputs.base_directory = data_dir
file_list.iterables = [("subject_id", subject_list)]


from nipype.interfaces.freesurfer import ReconAll
reconall = Node(ReconAll(), name = "Recon_All")
reconall.inputs.subject_id = "subject_id"
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = data_dir
#reconall.inputs.T1_files = "T1"
#reconall.run()
Example #54
0
                self.inputs.output_prefix + '1InverseWarp.nii.gz')
        return outputs


if __name__=='main':
    ####
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.io as nio
    #from nipype.interfaces.ants.registration import RegistrationSynQuick

    import os

    os.chdir('/Users/franzliem/Desktop/antstest')

    wf = Workflow(name='normalize')
    wf.base_dir = os.path.join('/Users/franzliem/Desktop/antstest/wf')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = '/Users/franzliem/Desktop/antstest/ds'

    s = Node(RegistrationSynQuick(), name='s_rigid')
    s.base_dir = '/Users/franzliem/Desktop/antstest/node_test'
    s.inputs.fixed_image = '/Applications/FSL/data/standard/FMRIB58_FA_1mm.nii.gz'
    s.inputs.moving_image = '/Users/franzliem/Desktop/antstest/dtifit__FA_ero.nii.gz'
    s.inputs.output_prefix = 'ants_mni'
    s.inputs.num_threads=4
    # s.inputs.use_histogram_matching = True
    # s.inputs.transform_type = 'r'
    # s.inputs.histogram_bins = 44
    # s.inputs.spline_distance = 15
    # s.inputs.precision_type = 'double'
Example #55
0
                                          ('t1map', 'inQuantitative'),
                                          ('uni', 'inT1weighted')]),
                 (background, strip, [('outMasked2','inInput')]),
                 (background, outputnode, [('outMasked2','uni_masked'),
                                           ('outMasked','t1map_masked'),
                                           ('outSignal2','background_mask')]),
                 (strip, outputnode, [('outStripped','uni_stripped'),
                                      ('outMask', 'skullstrip_mask'),
                                      ('outOriginal','uni_reoriented')])
                 ])


#### in and out ####################################################################################

#mp2rage.base_dir='/scr/ilz1/nonlinear_registration/lemon/testing/dicom_start/'
mp2rage.base_dir='/scr/kansas1/huntenburg/'
data_dir='/scr/litauen1/lsd/pilot_140521/dicoms/DL1T/'
#data_dir='/scr/ilz1/nonlinear_registration/lemon/testing/dicom_start/'
#data_dir = '/scr/jessica2/Schaare/LEMON/raw/'
#out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#mp2rage.config['execution']={'remove_unnecessary_outputs': 'False'}
subjects=['LEMON064', 'LEMON065', 'LEMON096']
# subjects=os.listdir(data_dir)
# subjects.remove('LEMON025')
# subjects.remove('LEMON065')

#infosource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id']), 
                  name='infosource')
infosource.iterables=('subject_id', subjects)
def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {'X_file': os.path.join(aggregated_subjects_dir, X_file_template.format(
            in_data_name=k)),
                                   'unimodal_backprojection_info_file': os.path.join(aggregated_subjects_dir,
                                                                                     info_file_template.format(
                                                                                         in_data_name=k))
                                   }



    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                      output_names=['X_multimodal_file',
                                                                    'multimodal_backprojection_info',
                                                                    'multimodal_name'],
                                                      function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics,
               'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict



    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                      'subjects_selection_crit_dict',
                                                      'selection_criterium'],
                                         output_names=['df_use_file',
                                                       'df_use_pickle_file',
                                                       'subjects_selection_index'],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium', select_subjects, 'selection_criterium')



    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                          'selection_criterium'],
                                             output_names=['X_multimodal_selected_file'],
                                             function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file', select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index', select_multimodal_X, 'subjects_selection_index')






    ###############################################################################################################
    # COMPILE NKI DATA
    ###############################################################################################################
    if run_2sample_training:

        ###############################################################################################################
        # GET INFO AND SELECT FILES
        df_all_subjects_pickle_file_nki = os.path.join(aggregated_subjects_dir_nki,
                                                       'df_all_subjects_pickle_file/df_all.pkl')
        df_nki = pd.read_pickle(df_all_subjects_pickle_file_nki)

        # build lookup dict for unimodal data
        X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
        info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
        unimodal_lookup_dict_nki = {}
        for k in in_data_name_list_unique:
            unimodal_lookup_dict_nki[k] = {'X_file': os.path.join(aggregated_subjects_dir_nki, X_file_template.format(
                in_data_name=k)),
                                           'unimodal_backprojection_info_file': os.path.join(
                                               aggregated_subjects_dir_nki,
                                               info_file_template.format(
                                                   in_data_name=k))
                                           }



        ###############################################################################################################
        # AGGREGATE MULTIMODAL METRICS
        # stack single modality arrays horizontally
        aggregate_multimodal_metrics_nki = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                              output_names=['X_multimodal_file',
                                                                            'multimodal_backprojection_info',
                                                                            'multimodal_name'],
                                                              function=aggregate_multimodal_metrics_fct),
                                                name='aggregate_multimodal_metrics_nki')
        wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics_nki,
                   'multimodal_list')
        aggregate_multimodal_metrics_nki.inputs.unimodal_lookup_dict = unimodal_lookup_dict_nki



        ###############################################################################################################
        # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
        select_subjects_nki = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                              'subjects_selection_crit_dict',
                                                              'selection_criterium'],
                                                 output_names=['df_use_file',
                                                               'df_use_pickle_file',
                                                               'subjects_selection_index'],
                                                 function=select_subjects_fct),
                                   name='select_subjects_nki')

        select_subjects_nki.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file_nki
        select_subjects_nki.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict_nki
        select_subjects_nki.inputs.selection_criterium = subjects_selection_crit_name_nki



        ###############################################################################################################
        # SELECT MULITMODAL X
        # select subjects (rows) from multimodal X according indexer
        select_multimodal_X_nki = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                                  'selection_criterium'],
                                                     output_names=['X_multimodal_selected_file'],
                                                     function=select_multimodal_X_fct),
                                       name='select_multimodal_X_nki')
        wf.connect(aggregate_multimodal_metrics_nki, 'X_multimodal_file', select_multimodal_X_nki, 'X_multimodal_file')
        wf.connect(select_subjects_nki, 'subjects_selection_index', select_multimodal_X_nki, 'subjects_selection_index')





    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    backprojection_node_dict = {}

    prediction_split = Node(util.Function(input_names=['X_file',
                                                       'target_name',
                                                       'selection_criterium',
                                                       'df_file',
                                                       'data_str',
                                                       'regress_confounds',
                                                       'run_cv',
                                                       'n_jobs_cv',
                                                       'run_tuning',
                                                       'X_file_nki',
                                                       'df_file_nki',
                                                       'reverse_split',
                                                       'random_state_nki',
                                                       'run_learning_curve',
                                                       'life_test_size'],
                                          output_names=['scatter_file',
                                                        'brain_age_scatter_file',
                                                        'df_life_out_file',
                                                        'df_nki_out_file',
                                                        'df_big_out_file',
                                                        'model_out_file',
                                                        'df_res_out_file',
                                                        'tuning_curve_file',
                                                        'scatter_file_cv',
                                                        'learning_curve_plot_file',
                                                        'learning_curve_df_file'],
                                          function=run_prediction_split_fct),
                            name='prediction_split')

    backproject_and_split_weights = Node(util.Function(input_names=['trained_model_file',
                                                                    'multimodal_backprojection_info',
                                                                    'data_str',
                                                                    'target_name'],
                                                       output_names=['out_file_list',
                                                                     'out_file_render_list'],
                                                       function=backproject_and_split_weights_fct),
                                         name='backproject_and_split_weights')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % (reg)
        prediction_node_dict[i] = prediction_split.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg
        the_in_node.inputs.run_cv = run_cv
        the_in_node.inputs.n_jobs_cv = n_jobs_cv
        the_in_node.inputs.run_tuning = run_tuning
        the_in_node.inputs.reverse_split = reverse_split
        the_in_node.inputs.random_state_nki = random_state_nki
        the_in_node.inputs.run_learning_curve = run_learning_curve
        the_in_node.inputs.life_test_size = life_test_size

        wf.connect(select_multimodal_X, 'X_multimodal_selected_file', the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium', the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node, 'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')

        wf.connect(the_in_node, 'model_out_file', ds, the_out_node_str + 'trained_model')
        wf.connect(the_in_node, 'scatter_file', ds_pdf, the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf, the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_life_out_file', ds_pdf, the_out_node_str + 'predicted_life')
        wf.connect(the_in_node, 'df_nki_out_file', ds_pdf, the_out_node_str + 'predicted_nki')
        wf.connect(the_in_node, 'df_big_out_file', ds_pdf, the_out_node_str + 'predicted')

        wf.connect(the_in_node, 'df_res_out_file', ds_pdf, the_out_node_str + 'results_error')
        wf.connect(the_in_node, 'tuning_curve_file', ds_pdf, the_out_node_str + 'tuning_curve')
        wf.connect(the_in_node, 'scatter_file_cv', ds_pdf, the_out_node_str + 'scatter_cv')
        wf.connect(the_in_node, 'learning_curve_plot_file', ds_pdf, the_out_node_str + 'learning_curve_plot_file.@plot')
        wf.connect(the_in_node, 'learning_curve_df_file', ds_pdf, the_out_node_str + 'learning_curve_df_file.@df')

        # NKI
        if run_2sample_training:
            wf.connect(select_multimodal_X_nki, 'X_multimodal_selected_file', the_in_node, 'X_file_nki')
            wf.connect(select_subjects_nki, 'df_use_pickle_file', the_in_node, 'df_file_nki')

        else:
            the_in_node.inputs.df_file_nki = None
            the_in_node.inputs.X_file_nki = None

        # BACKPROJECT PREDICTION WEIGHTS
        # map weights back to single modality original format (e.g., nifti or matrix)
        the_out_node_str = 'backprojection_single_source_model_reg_%s_' % (reg)
        backprojection_node_dict[i] = backproject_and_split_weights.clone(the_out_node_str)
        the_from_node = prediction_node_dict[i]
        the_in_node = backprojection_node_dict[i]
        wf.connect(the_from_node, 'model_out_file', the_in_node, 'trained_model_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_backprojection_info', the_in_node,
                   'multimodal_backprojection_info')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')

        wf.connect(the_in_node, 'out_file_list', ds_pdf, the_out_node_str + '.@weights')
        wf.connect(the_in_node, 'out_file_render_list', ds_pdf, the_out_node_str + 'renders.@renders')

        i += 1



    ###############################################################################################################
    # #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #57
0
def create_lemon_resting(subject, working_dir, data_dir, freesurfer_dir,
                         out_dir, vol_to_remove, TR, epi_resolution, highpass,
                         lowpass, echo_space, te_diff, pe_dir, standard_brain,
                         standard_brain_resampled, standard_brain_mask,
                         standard_brain_mask_resampled, fwhm_smoothing):
    # set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # main workflow
    func_preproc = Workflow(name='lemon_resting')
    func_preproc.base_dir = working_dir
    func_preproc.config['execution'][
        'crashdump_dir'] = func_preproc.base_dir + "/crash_files"
    # select files
    templates = {
        'func': 'func/EPI_t2.nii',
        'fmap_phase': 'unwarp/B0_ph.nii',
        'fmap_mag': 'unwarp/B0_mag.nii',
        'anat_head':
        'preprocessed/mod/anat/T1.nii.gz',  #either with mod or without
        'anat_brain':
        'preprocessed/mod/anat/brain.nii.gz',  #new version with brain_extraction from freesurfer  #T1_brain_brain.nii.gz',
        'brain_mask':
        'preprocessed/mod/anat/T1_brain_mask.nii.gz',  #T1_brain_brain_mask.nii.gz',
        'ants_affine':
        'preprocessed/mod/anat/transforms2mni/transform0GenericAffine.mat',
        'ants_warp':
        'preprocessed/mod/anat/transforms2mni/transform1Warp.nii.gz'
    }

    selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")

    # node to remove first volumes
    remove_vol = Node(util.Function(input_names=['in_file', 't_min'],
                                    output_names=["out_file"],
                                    function=strip_rois_func),
                      name='remove_vol')
    remove_vol.inputs.t_min = vol_to_remove
    # workflow for motion correction
    moco = create_moco_pipeline()
    # workflow for fieldmap correction and coregistration
    fmap_coreg = create_fmap_coreg_pipeline()
    fmap_coreg.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    fmap_coreg.inputs.inputnode.fs_subject_id = subject
    fmap_coreg.inputs.inputnode.echo_space = echo_space
    fmap_coreg.inputs.inputnode.te_diff = te_diff
    fmap_coreg.inputs.inputnode.pe_dir = pe_dir
    # workflow for applying transformations to timeseries
    transform_ts = create_transform_pipeline()
    transform_ts.inputs.inputnode.resolution = epi_resolution

    #workflow to convert signal into percent signal change
    normalize = create_normalize_pipeline()
    normalize.inputs.inputnode.tr = TR

    #workflow to transform timeseries to MNI
    ants_registration = create_ants_registration_pipeline()
    ants_registration.inputs.inputnode.ref = standard_brain_resampled

    #workflow to smooth
    smoothing = create_smoothing_pipeline()
    smoothing.inputs.inputnode.fwhm = fwhm_smoothing

    #workflow to correct slice timing
    slicetiming = create_slice_timing_pipeline()

    #visualize registration results
    visualize = create_visualize_pipeline()
    visualize.inputs.inputnode.mni_template = standard_brain_resampled

    #sink to store files
    sink = Node(nio.DataSink(
        parameterization=False,
        base_directory=out_dir,
        substitutions=[
            ('fmap_phase_fslprepared', 'fieldmap'),
            ('fieldmap_fslprepared_fieldmap_unmasked_vsm', 'shiftmap'),
            ('plot.rest_coregistered', 'outlier_plot'),
            ('filter_motion_comp_norm_compcor_art_dmotion',
             'nuissance_matrix'),
            ('rest_realigned.nii.gz_abs.rms', 'rest_realigned_abs.rms'),
            ('rest_realigned.nii.gz.par', 'rest_realigned.par'),
            ('rest_realigned.nii.gz_rel.rms', 'rest_realigned_rel.rms'),
            ('rest_realigned.nii.gz_abs_disp', 'abs_displacement_plot'),
            ('rest_realigned.nii.gz_rel_disp', 'rel_displacment_plot'),
            ('art.rest_coregistered_outliers', 'outliers'),
            ('global_intensity.rest_coregistered', 'global_intensity'),
            ('norm.rest_coregistered', 'composite_norm'),
            ('stats.rest_coregistered', 'stats'),
            ('rest_denoised_bandpassed_norm.nii.gz',
             'rest_preprocessed.nii.gz'),
            ('rest_denoised_bandpassed_norm_trans.nii.gz', 'rest_mni.nii.gz'),
            ('rest2anat_masked_st_norm_trans_smooth.nii',
             'rest_mni_smoothed.nii')
        ]),
                name='sink')

    # connections
    func_preproc.connect([
        #remove the first volumes
        (selectfiles, remove_vol, [('func', 'in_file')]),

        #align volumes and motion correction
        (remove_vol, moco, [('out_file', 'inputnode.epi')]),

        #prepare field map
        (selectfiles, fmap_coreg, [('fmap_phase', 'inputnode.phase'),
                                   ('fmap_mag', 'inputnode.mag'),
                                   ('anat_head', 'inputnode.anat_head'),
                                   ('anat_brain', 'inputnode.anat_brain')]),
        (moco, fmap_coreg, [('outputnode.epi_mean', 'inputnode.epi_mean')]),
        (remove_vol, transform_ts, [('out_file', 'inputnode.orig_ts')]),
        (selectfiles, transform_ts, [('anat_head', 'inputnode.anat_head')]),
        (selectfiles, transform_ts, [('brain_mask', 'inputnode.brain_mask')]),
        (moco, transform_ts, [('outputnode.mat_moco', 'inputnode.mat_moco')]),
        (fmap_coreg, transform_ts, [('outputnode.fmap_fullwarp',
                                     'inputnode.fullwarp')]),

        ##add slice time correction after applying motion realignement + unwarping
        (transform_ts, slicetiming, [('outputnode.trans_ts_masked',
                                      'inputnode.ts')]),
        (slicetiming, normalize, [('outputnode.ts_slicetcorrected',
                                   'inputnode.epi_coreg')]),
        (normalize, ants_registration, [('outputnode.normalized_file',
                                         'inputnode.denoised_ts')]),

        #registration to MNI space
        (selectfiles, ants_registration, [('ants_affine',
                                           'inputnode.ants_affine')]),
        (selectfiles, ants_registration, [('ants_warp', 'inputnode.ants_warp')
                                          ]),
        (ants_registration, smoothing, [('outputnode.ants_reg_ts',
                                         'inputnode.ts_transformed')]),
        (smoothing, visualize, [('outputnode.ts_smoothed',
                                 'inputnode.ts_transformed')]),

        ##all the output
        (
            moco,
            sink,
            [  #('outputnode.epi_moco', 'realign.@realigned_ts'),
                ('outputnode.par_moco', 'realign.@par'),
                ('outputnode.rms_moco', 'realign.@rms'),
                ('outputnode.mat_moco', 'realign.MAT.@mat'),
                ('outputnode.epi_mean', 'realign.@mean'),
                ('outputnode.rotplot', 'realign.plots.@rotplot'),
                ('outputnode.transplot', 'realign.plots.@transplot'),
                ('outputnode.dispplots', 'realign.plots.@dispplots'),
                ('outputnode.tsnr_file', 'realign.@tsnr')
            ]),
        (
            fmap_coreg,
            sink,
            [
                ('outputnode.fmap', 'coregister.transforms2anat.@fmap'),
                #('outputnode.unwarpfield_epi2fmap', 'coregister.@unwarpfield_epi2fmap'),
                ('outputnode.unwarped_mean_epi2fmap',
                 'coregister.@unwarped_mean_epi2fmap'),
                ('outputnode.epi2fmap', 'coregister.@epi2fmap'),
                #('outputnode.shiftmap', 'coregister.@shiftmap'),
                ('outputnode.fmap_fullwarp',
                 'coregister.transforms2anat.@fmap_fullwarp'),
                ('outputnode.epi2anat', 'coregister.@epi2anat'),
                ('outputnode.epi2anat_mat',
                 'coregister.transforms2anat.@epi2anat_mat'),
                ('outputnode.epi2anat_dat',
                 'coregister.transforms2anat.@epi2anat_dat'),
                ('outputnode.epi2anat_mincost', 'coregister.@epi2anat_mincost')
            ]),
        (
            transform_ts,
            sink,
            [  #('outputnode.trans_ts', 'coregister.@full_transform_ts'),
                ('outputnode.trans_ts_mean',
                 'coregister.@full_transform_mean'),
                ('outputnode.resamp_brain', 'coregister.@resamp_brain')
            ]),
        (ants_registration, sink, [('outputnode.ants_reg_ts',
                                    'ants.@antsnormalized')]),
        (smoothing, sink, [('outputnode.ts_smoothed', '@smoothed.FWHM6')]),
    ])

    #func_preproc.write_graph(dotfilename='func_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
    func_preproc.run()
                 '26723', '26724', '26728', '26753', '26782', '26789', '26793', 
                 '26801', '26802', '26803', '26804', '26805', '26806', '26820', 
                 '26839', '26841', '26842', '26843', '26844', '26856', '26857', 
                 '26858', '26926', '27696', '27834', '27954']



######################
# WF
######################

wd_dir = '/scr/kansas1/data/lsd-lemon/lemon_wd_meanDist_%s' % distype
ds_dir = '/scr/kansas1/data/lsd-lemon/lemon_results_meanDist_%s' % distype

wf = Workflow(name='distconnect_meanDist_%s' % distype)
wf.base_dir = os.path.join(wd_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                   'remove_unnecessary_outputs': False,
                                                                   'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(wd_dir, 'crash')

ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')


######################
# GET DATA
######################
# SUBJECTS ITERATOR
          
subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')