예제 #1
0
def create_reconall_pipeline(name='reconall'):
    reconall = Workflow(name='reconall')
    #inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'fs_subjects_dir', 'fs_subject_id']),
                     name='inputnode')
    outputnode = Node(
        util.IdentityInterface(fields=['fs_subjects_dir', 'fs_subject_id']),
        name='outputnode')
    # run reconall
    recon_all = Node(
        fs.ReconAll(
            args='-all -hippo-subfields -no-isrunning'
        ),  #for RSV152 took out s because of preprocessing with version 6.0
        name="recon_all")
    #recon_all.inputs.directive= 'autorecon2-wm' # -autorecon3
    recon_all.plugin_args = {'submit_specs': 'request_memory = 9000'}

    # function to replace / in subject id string with a _
    def sub_id(sub_id):
        return sub_id.replace('/', '_')

    reconall.connect([
        (inputnode, recon_all, [('fs_subjects_dir', 'subjects_dir'),
                                ('anat', 'T1_files'),
                                (('fs_subject_id', sub_id), 'subject_id')]),
        (recon_all, outputnode, [('subject_id', 'fs_subject_id'),
                                 ('subjects_dir', 'fs_subjects_dir')])
    ])
    return reconall
예제 #2
0
def create_workflow_allin_slices(name='motion_correction', iterfield=['in_file']):
    workflow = Workflow(name=name)
    inputs = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',

        'ref_func', 
        'ref_func_weights',

        'funcs',
        'funcs_masks',

        'mc_method',
    ]), name='in')
    inputs.iterables = [
        ('mc_method', ['afni:3dAllinSlices'])
    ]

    mc = MapNode(
        AFNIAllinSlices(),
        iterfield=iterfield,  
        name='mc')
    workflow.connect(
        [(inputs, mc,
          [('funcs', 'in_file'),
           ('ref_func_weights', 'in_weight_file'),
           ('ref_func', 'ref_file'),
           ])])
    return workflow
예제 #3
0
def freesurfer_nifti():
    '''
    Simple method to convert freesurfer mgz files to nifti format
    '''

    #start with a useful function to grab data
    #define workflow
    flow = Workflow(name='freesurfer_nifti')

    inputnode = Node(
        util.IdentityInterface(fields=['mgz_image', 'anatomical']),
        name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['aparc_aseg_nifti']),
                      name='outputnode')

    #define nodes
    convert_aparc_aseg = Node(interface=freesurfer.MRIConvert(),
                              name='aparc_aseg_nifti')
    convert_aparc_aseg.inputs.out_type = 'nii'

    anatomical = Node(interface=freesurfer.MRIConvert(),
                      name='anatomical_ready')
    anatomical.inputs.out_type = 'nii'

    #connect nodes
    return flow
예제 #4
0
def func2mni_wf():

    mni_skull_2mm = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm.nii.gz'
    mni_brain_2mm   = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz'

    flow  = Workflow('func2mni_nonlinear')

    inputnode  = Node(util.IdentityInterface(fields=['func_image',
                                                     'reference_image',
                                                     'func2anat_affine',
                                                     'anat2mni_warp']),name = 'inputnode')

    outputnode = Node(util.IdentityInterface(fields=['func2mni_2mm',
                                                     'func2mni_4mm']),name = 'outputnode')

    applywarp = Node(fsl.ApplyWarp(), name = 'apply_warp',)
    applywarp.inputs.ref_file            = mni_brain_2mm

    flirt4mm = Node(fsl.FLIRT(), name = 'resample_4mm')
    flirt4mm.inputs.reference         = mni_brain_2mm
    flirt4mm.inputs.apply_isoxfm      = 4.0

    flow.connect(inputnode, 'func_image'        , applywarp,  'in_file')
    flow.connect(inputnode, 'anat2mni_warp'     , applywarp,  'field_file')
    flow.connect(inputnode, 'func2anat_affine'  , applywarp,  'premat')
    flow.connect(applywarp, 'out_file'          , flirt4mm,   'in_file')

    flow.connect(applywarp, 'out_file'          , outputnode, 'func2mni_2mm')
    flow.connect(flirt4mm,  'out_file'          , outputnode, 'func2mni_4mm')

    return flow
예제 #5
0
def create_ants_registration_pipeline(name='ants_registration'):
    # set fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # initiate workflow
    ants_registration = Workflow(name='ants_registration')
    # inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['corr_Z', 'ants_affine', 'ants_warp', 'ref']),
                     name='inputnode')
    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        'ants_reg_corr_Z',
    ]),
                      name='outputnode')

    #also transform to mni space
    collect_transforms = Node(interface=util.Merge(2),
                              name='collect_transforms')

    ants_reg = MapNode(ants.ApplyTransforms(input_image_type=3,
                                            dimension=3,
                                            interpolation='Linear'),
                       name='ants_reg',
                       iterfield='input_image')

    ants_registration.connect([
        (inputnode, ants_reg, [('corr_Z', 'input_image')]),
        (inputnode, ants_reg, [('ref', 'reference_image')]),
        (inputnode, collect_transforms, [('ants_affine', 'in1')]),
        (inputnode, collect_transforms, [('ants_warp', 'in2')]),
        (collect_transforms, ants_reg, [('out', 'transforms')]),
        (ants_reg, outputnode, [('output_image', 'ants_reg_corr_Z')])
    ])

    return ants_registration
예제 #6
0
        def test_nipype_srtm_zhou2003_roi(self):
            from temporalimage import Quantity
            from temporalimage.t4d import _csvwrite_frameTiming
            import pandas as pd
            from .generate_test_data import generate_fakeTAC_SRTM

            self.t, self.dt, self.TAC, self.refTAC = generate_fakeTAC_SRTM(BP=0.5, R1=0.7)

            frameStart = self.t - self.dt/2
            frameEnd = self.t + self.dt/2
            csvfilename = os.path.join(self.tmpdirname,'srtm_roi_timing.csv')
            _csvwrite_frameTiming(frameStart, frameEnd, csvfilename, time_unit='min')

            roiTACcsvfile = os.path.join(self.tmpdirname,'roi_tacs.csv')
            roiTACs = pd.DataFrame({'target': self.TAC,
                                    'ref': self.refTAC})
            roiTACs.T.to_csv(roiTACcsvfile, index_label='ROI')

            infosource = Node(IdentityInterface(fields=['in_file']),
                              name="infosource")
            infosource.iterables = ('in_file', [roiTACcsvfile])

            km = Node(KineticModelROI(model='SRTM_Zhou2003',
                                      #roiTACcsvFile=roiTACcsvfile,
                                      frameTimingFile=csvfilename,
                                      refRegion='ref',
                                      startActivity=self.startActivity,
                                      weights=self.weights), name="km")

            km_workflow = Workflow(name="km_workflow", base_dir=self.tmpdirname)
            km_workflow.connect([
                (infosource, km, [('in_file', 'roiTACcsvFile')])
            ])
            km_workflow.run()
예제 #7
0
def create_workflow_hrfpattern_spm():

    # GLM
    design = Node(interface=spm_design(), name='design_glm')
    design.inputs.timing_units = 'secs'
    design.inputs.interscan_interval = .85
    design.inputs.bases = {'hrf': {'derivs': [0, 0]}}

    estimate = Node(interface=EstimateModel(), name="estimate")
    estimate.inputs.estimation_method = {'Classical': 1}

    contrastestimate = Node(interface=EstimateContrast(), name="contrast")
    contrastestimate.inputs.contrasts = [('Visual', 'T', [
        '1',
    ], [
        1,
    ])]

    w = Workflow(name='hrfpattern_spm')
    w.connect(input_node, 'bold', model, 'functional_runs')
    w.connect(input_node, 'events', model, 'bids_event_file')
    w.connect(model, 'session_info', design, 'session_info')
    w.connect(design, 'spm_mat_file', estimate, 'spm_mat_file')
    w.connect(estimate, 'spm_mat_file', contrastestimate, 'spm_mat_file')
    w.connect(estimate, 'beta_images', contrastestimate, 'beta_images')
    w.connect(estimate, 'residual_image', contrastestimate, 'residual_image')
    w.connect(contrastestimate, 'spmT_images', output_node, 'T_image')
    return w
예제 #8
0
def ica_aroma_workflow():

    flow = Workflow('denoise_ica_aroma')

    inputnode = Node(util.IdentityInterface(fields=[
        'fslDir', 'inFile', 'mask', 'dim', 'TR', 'mc', 'denType', 'affmat',
        'warp'
    ]),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['denoised']),
                      name='outputnode')

    aroma = Node(util.Function(input_names=[
        'fslDir', 'inFile', 'mask', 'dim', 'TR', 'mc', 'denType', 'affmat',
        'warp'
    ],
                               output_names=['denoised'],
                               function=ica_aroma_denoise),
                 name='ICA_AROMA')

    flow.connect(inputnode, 'fslDir', aroma, 'fslDir')
    flow.connect(inputnode, 'inFile', aroma, 'inFile')
    flow.connect(inputnode, 'mask', aroma, 'mask')
    flow.connect(inputnode, 'dim', aroma, 'dim')
    flow.connect(inputnode, 'TR', aroma, 'TR')
    flow.connect(inputnode, 'mc', aroma, 'mc')
    flow.connect(inputnode, 'denType', aroma, 'denType')
    flow.connect(inputnode, 'affmat', aroma, 'affmat')
    flow.connect(inputnode, 'warp', aroma, 'warp')
    flow.connect(aroma, 'denoised', outputnode, 'denoised')

    return flow
예제 #9
0
def create_normalize_pipeline(name='normalize'):
    # workflow
    normalize = Workflow(name='normalize')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=['epi_coreg',
                                                              'tr']),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=[
        'normalized_file']),
        name='outputnode')

    # time-normalize scans
    normalize_time = Node(util.Function(input_names=['in_file', 'tr'],
                                        output_names=['out_file'],
                                        function=time_normalizer),
                          name='normalize_time')
    normalize_time.plugin_args = {'submit_specs': 'request_memory = 17000'}
    normalize.connect([(inputnode, normalize_time, [('tr', 'tr')]),
                       (inputnode, normalize_time, [('epi_coreg', 'in_file')]),
                       (normalize_time, outputnode, [('out_file', 'normalized_file')])
                       ])

    # time-normalize scans    

    return normalize
예제 #10
0
def create_reconall_pipeline(name='reconall_wf'):
    reconall_wf = Workflow(name='reconall_wf')
    #inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'fs_subjects_dir', 'fs_subject_id']),
                     name='inputnode')
    outputnode = Node(
        util.IdentityInterface(fields=['fs_subjects_dir', 'fs_subject_id']),
        name='outputnode')

    # run reconall
    reconall = Node(
        fs.ReconAll(args='-all -no-isrunning',
                    openmp=8),  #subfield segmentation after recon-all
        name="reconall")
    #recon_all.inputs.directive= 'autorecon2-wm' # -autorecon3
    reconall_wf.plugin_args = {'submit_specs': 'request_memory = 9000'}
    reconall_wf.connect([
        (inputnode, reconall, [('fs_subject_id', 'subject_id')]),
        (inputnode, reconall, [('fs_subjects_dir', 'subjects_dir'),
                               ('anat', 'T1_files')]),
        (reconall, outputnode, [('subject_id', 'fs_subject_id'),
                                ('subjects_dir', 'fs_subjects_dir')])
    ])
    return reconall_wf
예제 #11
0
def make_workflow():

    flairs = [os.path.abspath(i) for i in glob.glob(args.flair)]
    weights = [os.path.abspath(i) for i in glob.glob(args.weights)]
    weights_source = Node(interface=IdentityInterface(fields=['weights']),
                          name='weights_source')
    weights_source.inputs.weights = weights

    data_source = Node(IdentityInterface(fields=['flairs']),
                       name='data_source')
    data_source.iterables = ('flairs', flairs)

    sink = Node(interface=DataSink(), name='sink')
    sink.inputs.base_directory = wmh_dir
    sink.inputs.substitutions = [
        ('_flairs_', ''),
        ('_FLAIR.nii.gz/', '/'),
    ]
    sink.inputs.regexp_substitutions = [
        ('\.\..*\.\.', ''),
    ]

    test_wf = ibbmTum_wf.get_test_wf(row_st=192, cols_st=192, thres_mask=10)

    wmh = Workflow(name='wmh', base_dir=wf_temp)

    wmh.connect(weights_source, 'weights', test_wf, 'inputspec.weights')
    wmh.connect(data_source, 'flairs', test_wf, 'inputspec.flair')
    wmh.connect(test_wf, 'outputspec.wmh_mask', sink, '@pred')

    return wmh
예제 #12
0
def create_reconall_pipeline(name='reconall'):

    reconall = Workflow(name='reconall')

    #inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'fs_subjects_dir', 'fs_subject_id']),
                     name='inputnode')

    outputnode = Node(
        util.IdentityInterface(fields=['fs_subjects_dir', 'fs_subject_id']),
        name='outputnode')

    # run reconall
    recon_all = Node(fs.ReconAll(args='-nuiterations 7 -no-isrunning'),
                     name="recon_all")
    recon_all.plugin_args = {'submit_specs': 'request_memory = 9000'}

    # function to replace / in subject id string with a _
    def sub_id(sub_id):
        return sub_id.replace('/', '_')

    reconall.connect([
        (inputnode, recon_all, [('fs_subjects_dir', 'subjects_dir'),
                                ('anat', 'T1_files'),
                                (('fs_subject_id', sub_id), 'subject_id')]),
        (recon_all, outputnode, [('subject_id', 'fs_subject_id'),
                                 ('subjects_dir', 'fs_subjects_dir')])
    ])

    return reconall
예제 #13
0
def create_converter_structural_pipeline(working_dir, ds_dir, name="converter_struct"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["t1w_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["t1w"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    # convert to nifti
    # todo check if geometry bugs attac. use dcm2nii?
    converter_t1w = Node(DcmStack(embed_meta=True), name="converter_t1w")
    converter_t1w.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_t1w.inputs.out_format = "t1w"

    converter_wf.connect(inputnode, "t1w_dicom", converter_t1w, "dicom_files")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(converter_t1w, "out_file", reor_2_std, "in_file")

    converter_wf.connect(reor_2_std, "out_file", outputnode, "t1w")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "sMRI")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
예제 #14
0
def preprocess(input_file,
               output_dir,
               conform=True,
               bias_correct=True,
               skullstrip=True):

    preprocess_flow = Workflow(name='preprocess', base_dir=output_dir)

    conform = Node(MRIConvert(conform=True,
                              out_type='niigz',
                              out_file='conformed.nii.gz'),
                   name='conform')
    n4 = Node(N4BiasFieldCorrection(dimension=3,
                                    bspline_fitting_distance=300,
                                    shrink_factor=3,
                                    n_iterations=[50, 50, 30, 20],
                                    output_image='n4.nii.gz'),
              name='n4')
    robex = Node(ROBEX(seed=1729, stripped_image='brain.nii.gz'), name='robex')

    preprocess_flow.connect([(conform, n4, [('out_file', 'input_image')]),
                             (n4, robex, [('output_image', 'input_image')])])

    preprocess_flow.write_graph(graph2use='orig')
    conform.inputs.in_file = input_file
    preprocess_flow.run('MultiProc', plugin_args={'n_procs': 5})
예제 #15
0
def create_ants_registration_pipeline(name='ants_registration'):
    # set fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # initiate workflow
    ants_registration = Workflow(name='ants_registration')
    # inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['denoised_ts', 'composite_transform', 'ref']),
                     name='inputnode')
    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        'ants_reg_ts',
    ]),
                      name='outputnode')

    ants_reg = Node(ants.ApplyTransforms(input_image_type=3,
                                         dimension=3,
                                         interpolation='Linear'),
                    name='ants_reg')

    ants_registration.connect([
        (inputnode, ants_reg, [('denoised_ts', 'input_image')]),
        (inputnode, ants_reg, [('ref', 'reference_image')]),
        (inputnode, ants_reg, [('composite_transform', 'transforms')]),
        (ants_reg, outputnode, [('output_image', 'ants_reg_ts')])
    ])

    return ants_registration
예제 #16
0
def create_smoothing_pipeline(name='smoothing'):
    # set fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI')
    # initiate workflow
    smoothing = Workflow(name='smoothing')
    # inputnode
    inputnode=Node(util.IdentityInterface(fields=['ts_transformed',
    'fwhm'
    ]),
    name='inputnode')
    # outputnode
    outputnode=Node(util.IdentityInterface(fields=['ts_smoothed'
    ]),
    name='outputnode')
    
    
    #apply smoothing
    smooth = MapNode(fsl.Smooth(),name = 'smooth', iterfield='in_file')
   
    
    smoothing.connect([
    (inputnode, smooth, [
    ('ts_transformed', 'in_file'),
    ('fwhm', 'fwhm')]
    ), 
    (smooth, outputnode, [('smoothed_file', 'ts_smoothed')]
    )
    ])
    
 



    
    return smoothing
예제 #17
0
    def create(self):  #, **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print("=" * 80)
        print(csvOut.outputs.__dict__)
        print("=" * 80)

        iters = {}
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(
            result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True,
                        name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  #TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun':
            'false',  # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab':
            'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths':
            'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect([
            (metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'),
                                    ('inputs.T1', 'trainT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.trainindex', 'trainLabels.index'),
                                    ('inputs.Label', 'trainLabels.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'),
                                    ('inputs.T1', 'testT1s.inlist')])
        ])
예제 #18
0
def create_dcmconvert_pipeline(name='dcmconvert'):

    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    from nipype.interfaces.dcmstack import DcmStack

    # workflow
    dcmconvert = Workflow(name='dcmconvert')

    #inputnode
    inputnode = Node(util.IdentityInterface(fields=['dicoms', 'filename']),
                     name='inputnode')

    # outputnode
    outputnode = Node(util.IdentityInterface(fields=['nifti']),
                      name='outputnode')

    # conversion node
    converter = Node(DcmStack(embed_meta=True), name='converter')

    # connections
    dcmconvert.connect([(inputnode, converter, [('dicoms', 'dicom_files'),
                                                ('filename', 'out_format')]),
                        (converter, outputnode, [('out_file', 'nifti')])])

    return dcmconvert
예제 #19
0
def create_reconall_pipeline(name='reconall'):
    reconall = Workflow(name='reconall')
    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['anat',
                                                    'fs_subjects_dir',
                                                    'fs_subject_id'
                                                    ]),
                     name='inputnode')
    outputnode = Node(util.IdentityInterface(fields=['fs_subjects_dir',
                                                     'fs_subject_id']),
                      name='outputnode')
    # run reconall
    recon_all = Node(fs.ReconAll(args='-autorecon2 -nuiterations 7 -no-isrunning -hippo-subfields'),
                     name="recon_all")
    # recon_all.inputs.directive= 'autorecon2-wm' # -autorecon3
    recon_all.plugin_args = {'submit_specs': 'request_memory = 9000'}
    # function to replace / in subject id string with a _
    def sub_id(sub_id):
        return sub_id.replace('/', '_')

    reconall.connect([(inputnode, recon_all, [('fs_subjects_dir', 'subjects_dir'),
                                              ('anat', 'T1_files'),
                                              (('fs_subject_id', sub_id), 'subject_id')]),
                      (recon_all, outputnode, [('subject_id', 'fs_subject_id'),
                                               ('subjects_dir', 'fs_subjects_dir')])
                      ])
    return reconall
예제 #20
0
def make_w_coreg_3T_afni():

    n_in = Node(IdentityInterface(fields=[
        'mean',
        'T1w',
    ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'mat_func2struct',
    ]),
                 name='output')

    n_allineate = Node(interface=Allineate(), name='allineate')
    n_allineate.inputs.one_pass = True
    n_allineate.inputs.args = '-master BASE'
    n_allineate.inputs.warp_type = 'shift_rotate'
    n_allineate.inputs.cost = 'nmi'
    n_allineate.inputs.outputtype = 'NIFTI'
    n_allineate.inputs.out_file = 'afni_realigned.nii.gz'
    n_allineate.inputs.out_matrix = 'afni_realigned.aff12.1D'

    w = Workflow('coreg_afni')
    w.connect(n_in, 'mean', n_allineate, 'in_file')
    w.connect(n_in, 'T1w', n_allineate, 'reference')
    w.connect(n_allineate, 'out_matrix', n_out, 'mat_func2struct')

    return w
예제 #21
0
    def __call__(self, **kwargs):
        kwargs = modify_paths(kwargs, relative=False)
        interface = self.interface()
        # Set the inputs early to get some argument checking
        interface.inputs.set(**kwargs)
        # Make a name for our node
        inputs = interface.inputs.get_hashval()
        hasher = hashlib.new('md5')
        hasher.update(pickle.dumps(inputs))
        dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'),
                              interface.__class__.__name__)
        job_name = hasher.hexdigest()
        node = Node(interface, name=job_name)
        node.base_dir = os.path.join(self.base_dir, dir_name)

        cwd = os.getcwd()
        try:
            out = node.run()
        finally:
            # node.run() changes to the node directory - if something goes wrong
            # before it cds back you would end up in strange places
            os.chdir(cwd)
        if self.callback is not None:
            self.callback(dir_name, job_name)
        return out
    def make_segment(self):
        # Ref: http://nipype.readthedocs.io/en/0.12.1/interfaces/generated/nipype.interfaces.fsl.utils.html#reorient2std
        ro = Node(interface=fsl.Reorient2Std(), name='ro')

        # Ref: http://nipype.readthedocs.io/en/latest/interfaces/generated/interfaces.spm/preprocess.html#segment
        seg = Node(interface=spm.NewSegment(channel_info=(0.0001, 60, (True,
                                                                       True))),
                   name="seg")

        spm_tissues_split = Node(Function(['in_list'], ['gm', 'wm', 'csf'],
                                          self.spm_tissues),
                                 name='spm_tissues_split')

        gzip = Node(Function(['in_list'], ['out_list'], self.gzip_spm),
                    name='gzip')

        segment = Workflow(name='Segment', base_dir=self.temp_dir)

        gunzip = Node(interface=Gunzip(), name='gunzip')
        # for new segment
        segment.connect(ro, 'out_file', gunzip, 'in_file')
        segment.connect(gunzip, 'out_file', seg, 'channel_files')
        segment.connect(seg, 'native_class_images', spm_tissues_split,
                        'in_list')
        return segment
예제 #23
0
파일: memory.py 프로젝트: wanderine/nipype
    def __call__(self, **kwargs):
        kwargs = modify_paths(kwargs, relative=False)
        interface = self.interface()
        # Set the inputs early to get some argument checking
        interface.inputs.set(**kwargs)
        # Make a name for our node
        inputs = interface.inputs.get_hashval()
        hasher = hashlib.new('md5')
        hasher.update(pickle.dumps(inputs))
        dir_name = '%s-%s' % (interface.__class__.__module__.replace(
            '.', '-'), interface.__class__.__name__)
        job_name = hasher.hexdigest()
        node = Node(interface, name=job_name)
        node.base_dir = os.path.join(self.base_dir, dir_name)

        cwd = os.getcwd()
        try:
            out = node.run()
        finally:
            # node.run() changes to the node directory - if something goes wrong
            # before it cds back you would end up in strange places
            os.chdir(cwd)
        if self.callback is not None:
            self.callback(dir_name, job_name)
        return out
예제 #24
0
def create_normalize_pipeline(name='normalize'):
    # workflow
    normalize = Workflow(name='normalize')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=['epi_coreg',
    'tr']),
    name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=[
    'normalized_file']),
    name='outputnode')

    # time-normalize scans
    normalize_time=Node(util.Function(input_names=['in_file','tr'],
    output_names=['out_file'],
    function=time_normalizer),
    name='normalize_time')
    normalize_time.plugin_args={'submit_specs': 'request_memory = 17000'}
    normalize.connect([(inputnode, normalize_time, [('tr', 'tr')]),
    (inputnode, normalize_time, [('epi_coreg', 'in_file')]),
    (normalize_time, outputnode, [('out_file', 'normalized_file')])
    ])
    
    # time-normalize scans    
    
    return normalize
예제 #25
0
def create_reconall_pipeline(name='reconall'):

    reconall = Workflow(name='reconall')

    #inputnode
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'fs_subjects_dir', 'fs_subject_id']),
                     name='inputnode')

    outputnode = Node(
        util.IdentityInterface(fields=['fs_subjects_dir', 'fs_subject_id']),
        name='outputnode')

    # run reconall
    recon_all = create_skullstripped_recon_flow()

    # function to replace / in subject id string with a _
    def sub_id(sub_id):
        return sub_id.replace('/', '_')

    reconall.connect([(inputnode, recon_all,
                       [('fs_subjects_dir', 'inputspec.subjects_dir'),
                        ('anat', 'inputspec.T1_files'),
                        (('fs_subject_id', sub_id), 'inputspec.subject_id')]),
                      (recon_all, outputnode,
                       [('outputspec.subject_id', 'fs_subject_id'),
                        ('outputspec.subjects_dir', 'fs_subjects_dir')])])

    return reconall
예제 #26
0
def make_full_workflow(session='7TGE', n_fmap=10):
    n_in = Node(IdentityInterface(fields=[
        'T1w',
        'func',
        'fmap',
        'subject',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'func1',
        'func2',
        'filtered1',
        'filtered2',
        'mat_func2struct',
    ]),
                 name='output')

    n_merge = Node(interface=Merge(), name='merge')
    n_merge.inputs.dimension = 't'

    w_preproc = make_workflow(n_fmap)

    w_smooth1 = make_w_smooth('1')
    w_smooth2 = make_w_smooth('2')

    w = Workflow(session)

    w.connect(n_in, 'func', n_merge, 'in_files')
    w.connect(n_merge, 'merged_file', w_preproc, 'input.func')
    w.connect(n_in, 'fmap', w_preproc, 'input.fmap')
    w.connect(w_preproc, 'output.func1', n_out, 'func1')
    w.connect(w_preproc, 'output.func2', n_out, 'func2')
    w.connect(w_preproc, 'output.func1', w_smooth1, 'input.func')
    w.connect(w_preproc, 'output.func2', w_smooth2, 'input.func')
    w.connect(w_smooth1, 'output.func', n_out, 'filtered1')
    w.connect(w_smooth2, 'output.func', n_out, 'filtered2')

    if session.startswith('7T'):
        w_coreg_7T = make_w_coreg_7T()
        w.connect(n_in, 'T1w', w_coreg_7T, 'input.T1w')
        w.connect(w_preproc, 'output.mean', w_coreg_7T, 'input.mean')
        w.connect(w_coreg_7T, 'output.mat_ants', n_out, 'mat_func2struct')

    else:
        w_coreg = make_w_freesurfer2func()
        w_coreg_3T = make_w_coreg_3T_ants()
        """
        w.connect(n_in, 'T1w', w_coreg, 'input.T1w')
        w.connect(n_in, 'subject', w_coreg, 'input.subject')
        w.connect(w_preproc, 'output.mean', w_coreg, 'input.mean')
        """

        w.connect(n_in, 'T1w', w_coreg_3T, 'input.T1w')
        w.connect(w_preproc, 'output.mean', w_coreg_3T, 'input.mean')
        w.connect(w_coreg_3T, 'output.mat_func2struct', n_out,
                  'mat_func2struct')

    return w
예제 #27
0
def stub_node_factory(*args, **kwargs):
    if 'name' not in kwargs.keys():
        raise Exception()
    name = kwargs['name']
    if name == 'compcor':
        return Node(*args, **kwargs)
    else:  # replace with an IdentityInterface
        return Node(IdentityInterface(fields=ALL_FIELDS), name=name)
예제 #28
0
def stub_wf(*args, **kwargs):
    wflow = Workflow(name='realigner')
    inputnode = Node(IdentityInterface(fields=['func']), name='inputspec')
    outputnode = Node(
        interface=IdentityInterface(fields=['realigned_file']),
        name='outputspec')
    wflow.connect(inputnode, 'func', outputnode, 'realigned_file')
    return wflow
    def make_infosource(self):

        # Infosource: Iterate through subject names
        infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                          name="infosource")

        infosource.iterables = ('subject_id', self.subject_list)
        return infosource
예제 #30
0
def node_result_file(u: pe.Node) -> Path:
    u._output_dir = None  # reset this just in case

    output_dir = u.output_dir()
    assert isinstance(output_dir, str)

    result_file = Path(output_dir) / f"result_{u.name}.pklz"

    return result_file
예제 #31
0
def create_converter_diffusion_pipeline(working_dir,
                                        ds_dir,
                                        name='converter_diffusion'):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['dMRI_dicom']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['dMRI']),
                      name='outputnode')

    niftisink = Node(nio.DataSink(), name='niftisink')
    niftisink.inputs.base_directory = os.path.join(ds_dir, 'raw_niftis')

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {'submit_specs': 'request_memory = 2000'}
    converter_wf.connect(inputnode, 'dMRI_dicom', converter_dMRI,
                         'source_names')

    dMRI_rename = Node(util.Rename(format_string='DTI_mx_137.nii.gz'),
                       name='dMRI_rename')
    converter_wf.connect(converter_dMRI, 'converted_files', dMRI_rename,
                         'in_file')

    bvecs_rename = Node(util.Rename(format_string='DTI_mx_137.bvecs'),
                        name='bvecs_rename')
    converter_wf.connect(converter_dMRI, 'bvecs', bvecs_rename, 'in_file')

    bvals_rename = Node(util.Rename(format_string='DTI_mx_137.bvals'),
                        name='bvals_rename')
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, 'in_file')

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name='reor_2_std')
    converter_wf.connect(dMRI_rename, 'out_file', reor_2_std, 'in_file')
    converter_wf.connect(reor_2_std, 'out_file', outputnode, 'dMRI')

    # save original niftis
    converter_wf.connect(reor_2_std, 'out_file', niftisink, 'dMRI.@dwi')
    converter_wf.connect(bvals_rename, 'out_file', niftisink, 'dMRI.@bvals')
    converter_wf.connect(bvecs_rename, 'out_file', niftisink, 'dMRI.@bvecs')

    converter_wf.write_graph(dotfilename='converter_struct',
                             graph2use='flat',
                             format='pdf')
    return converter_wf
예제 #32
0
def create_quick_registration(name='quickreg'):
    # workflow
    quickreg = Workflow(name='quickreg')
    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['anat', 'standard']),
                     name='inputnode')
    #MapNode(util.IdentityInterface(fields=['anat', 'standard']),    name='inputnode', iterfield=['anat'])
    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        'anat2std_transforms', 'composite', 'anat2std', 'std2anat_transforms',
        'std2anat', 'inverse_composite'
    ]),
                      name='outputnode')

    antsreg = Node(
        ants.Registration(
            dimension=3,
            transforms=['SyN'],
            metric=['CC'],
            metric_weight=[1],
            number_of_iterations=[[70, 50, 30]],
            convergence_threshold=[1e-6],
            convergence_window_size=[10],
            shrink_factors=[[8, 4, 2]],
            smoothing_sigmas=[[3, 2, 1]],
            sigma_units=['vox'],
            initial_moving_transform_com=1,
            transform_parameters=[(0.1, 3.0, 0.0)],
            sampling_strategy=['None'],
            sampling_percentage=[1],
            radius_or_number_of_bins=[4],
            num_threads=1,
            interpolation='Linear',
            winsorize_lower_quantile=0.005,
            winsorize_upper_quantile=0.995,
            collapse_output_transforms=True,
            output_warped_image=True,
            write_composite_transform=True,
            #output_inverse_warped_image=True,
            #output_warped_image=True,
            use_histogram_matching=True,
        ),
        name='antsreg')
    # connections
    quickreg.connect([(inputnode, antsreg, [('anat', 'moving_image'),
                                            ('standard', 'fixed_image')]),
                      (antsreg, outputnode,
                       [('forward_transforms', 'anat2std_transforms'),
                        ('reverse_transforms', 'std2anat_transforms'),
                        ('inverse_composite_transform', 'inverse_composite'),
                        ('warped_image', 'anat2std'),
                        ('composite_transform', 'composite'),
                        ('inverse_warped_image', 'std2anat')])])

    return quickreg
예제 #33
0
def anatomical_preprocessing():
    '''
    Inputs:
        MP2RAGE Skull stripped image using Spectre-2010

    Workflow:
        1. reorient to RPI
        2. create a brain mask

    Returns:
        brain
        brain_mask

    '''
    # define workflow
    flow = Workflow('anat_preprocess')
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'anat_gm', 'anat_wm', 'anat_csf', 'anat_first']),
                     name='inputnode')
    outputnode = Node(util.IdentityInterface(fields=[
        'brain',
        'brain_gm',
        'brain_wm',
        'brain_csf',
        'brain_first',
        'brain_mask',
    ]),
                      name='outputnode')

    reorient = Node(interface=preprocess.Resample(), name='anat_reorient')
    reorient.inputs.orientation = 'RPI'
    reorient.inputs.outputtype = 'NIFTI'

    erode = Node(interface=fsl.ErodeImage(), name='anat_preproc')

    reorient_gm = reorient.clone('anat_preproc_gm')
    reorient_wm = reorient.clone('anat_preproc_wm')
    reorient_cm = reorient.clone('anat_preproc_csf')
    reorient_first = reorient.clone('anat_preproc_first')

    make_mask = Node(interface=fsl.UnaryMaths(), name='anat_preproc_mask')
    make_mask.inputs.operation = 'bin'

    # connect workflow nodes
    flow.connect(inputnode, 'anat', reorient, 'in_file')
    flow.connect(inputnode, 'anat_gm', reorient_gm, 'in_file')
    flow.connect(inputnode, 'anat_wm', reorient_wm, 'in_file')
    flow.connect(inputnode, 'anat_csf', reorient_cm, 'in_file')
    flow.connect(inputnode, 'anat_first', reorient_first, 'in_file')
    flow.connect(reorient, 'out_file', erode, 'in_file')
    flow.connect(erode, 'out_file', make_mask, 'in_file')
    flow.connect(make_mask, 'out_file', outputnode, 'brain_mask')

    flow.connect(erode, 'out_file', outputnode, 'brain')
    flow.connect(reorient_gm, 'out_file', outputnode, 'brain_gm')
    flow.connect(reorient_wm, 'out_file', outputnode, 'brain_wm')
    flow.connect(reorient_cm, 'out_file', outputnode, 'brain_csf')
    flow.connect(reorient_first, 'out_file', outputnode, 'brain_first')

    return flow
예제 #34
0
def make_workflow_roi(region):
    """
    Benson_ROI_Names = {'V1', 'V2', 'V3', 'hV4', 'VO1', 'VO2', 'LO1', 'LO2', 'TO1', 'TO2', 'V3B', 'V3A'};

    Wang_ROI_Names = [
        'V1v', 'V1d', 'V2v', 'V2d', 'V3v', 'V3d', 'hV4', 'VO1', 'VO2', 'PHC1', 'PHC2',
        'TO2', 'TO1', 'LO2', 'LO1', 'V3B', 'V3A', 'IPS0', 'IPS1', 'IPS2', 'IPS3', 'IPS4' ,
        'IPS5', 'SPL1', 'FEF'];
    """

    w = Workflow(f'roi_{region}')

    n_in = Node(IdentityInterface(fields=[
        'atlas',
        'func_to_struct',
        'struct_to_freesurfer',
        'ref',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'mask_file',
    ]), name='output')

    n_m = Node(Merge(2), 'merge')

    n_v = Node(MathsCommand(), region)
    n_v.inputs.out_file = 'roi.nii.gz'
    n_v.inputs.nan2zeros = True

    if region == 'V1':
        n_v.inputs.args = '-uthr 1 -bin'
    elif region == 'V2':
        n_v.inputs.args = '-thr 2 -uthr 3 -bin'
    elif region == 'V3':
        n_v.inputs.args = '-thr 4 -uthr 5 -bin'
    else:
        raise ValueError(f'Unknown region {region}. It should be V1, V2, V3')

    at = Node(ApplyTransforms(), 'applytransform')
    at.inputs.dimension = 3
    at.inputs.output_image = 'roi_func.nii.gz'
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = [True, True]

    w.connect(n_in, 'atlas', n_v, 'in_file')
    w.connect(n_v, 'out_file', at, 'input_image')
    w.connect(n_in, 'ref', at, 'reference_image')
    w.connect(n_in, 'struct_to_freesurfer', n_m, 'in1')
    w.connect(n_in, 'func_to_struct', n_m, 'in2')
    w.connect(n_m, 'out', at, 'transforms')
    w.connect(at, 'output_image', n_out, 'mask_file')

    return w
예제 #35
0
파일: utils.py 프로젝트: Yaqiongxiao/LeiCA
    def _merge_nii(file_list, out_filename):
        from nipype.pipeline.engine import Node, Workflow
        import nipype.interfaces.fsl as fsl

        merge = Node(fsl.Merge(dimension='t'), name='merge')
        merge.base_dir = os.getcwd()
        merge.inputs.in_files = file_list
        merge.inputs.merged_file = out_filename
        result = merge.run()

        return result.outputs.merged_file
예제 #36
0
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True, name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  # TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun': 'false',
        # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab': 'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths': 'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
             (metaflow, fusionflow,
              [('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
             (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
             ])
예제 #37
0
def func_preprocess(name = 'func_preproc'):

    '''
    Method to preprocess functional data after warping to anatomical space.

    Accomplished after one step Distortion Correction, Motion Correction and Boundary based linear registration to
    anatomical space.

    Precodure includes:
    # 1- skull strip
    # 2- Normalize the image intensity values.
    # 3- Calculate Mean of Skull stripped image
    # 4- Create brain mask from Normalized data.
    '''

    # Define Workflow
    flow        = Workflow(name=name)
    inputnode   = Node(util.IdentityInterface(fields=['func_in']),
                           name='inputnode')
    outputnode  = Node(util.IdentityInterface(fields=['func_preproc',
                                                      'func_preproc_mean',
                                                      'func_preproc_mask']),
                           name = 'outputnode')


    # 2- Normalize the image intensity values.
    norm                               = Node(interface = fsl.ImageMaths(),       name = 'func_normalized')
    norm.inputs.op_string              = '-ing 1000'
    norm.out_data_type                 = 'float'
    norm.output_type                   = 'NIFTI'

    # 4- Create brain mask from Normalized data.
    mask                               = Node(interface = fsl.BET(),  name = 'func_preprocessed')
    mask.inputs.functional             = True
    mask.inputs.mask                   = True
    mask.inputs.frac                   = 0.5
    mask.inputs.vertical_gradient      = 0
    mask.inputs.threshold              = True

    # 3- Calculate Mean of Skull stripped image
    mean                          = Node(interface = preprocess.TStat(),     name = 'func_preprocessed_mean')
    mean.inputs.options           = '-mean'
    mean.inputs.outputtype        = 'NIFTI'


    flow.connect( inputnode  ,   'func_in'           ,   norm,        'in_file'     )
    flow.connect( norm       ,   'out_file'          ,   mask,        'in_file'     )
    flow.connect( norm       ,   'out_file'          ,   mean,        'in_file'     )
    flow.connect( mask       ,   'out_file'          ,   outputnode,  'func_preproc')
    flow.connect( mask       ,   'mask_file'         ,   outputnode,  'func_preproc_mask')
    flow.connect( mean       ,   'out_file'          ,   outputnode,  'func_preproc_mean')

    return flow
예제 #38
0
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
예제 #39
0
def anatomical_preprocessing():
    '''
    Inputs:
        MP2RAGE Skull stripped image using Spectre-2010

    Workflow:
        1. reorient to RPI
        2. create a brain mask

    Returns:
        brain
        brain_mask

    '''
    # define workflow
    flow = Workflow('anat_preprocess')
    inputnode    = Node(util.IdentityInterface(fields=['anat', 'anat_gm', 'anat_wm', 'anat_csf', 'anat_first']), name = 'inputnode')
    outputnode   = Node(util.IdentityInterface(fields=['brain','brain_gm', 'brain_wm', 'brain_csf', 'brain_first', 'brain_mask',]),  name = 'outputnode')

    reorient   = Node(interface=preprocess.Resample(),                     name = 'anat_reorient')
    reorient.inputs.orientation = 'RPI'
    reorient.inputs.outputtype = 'NIFTI'

    erode = Node(interface=fsl.ErodeImage(),                                 name = 'anat_preproc')

    reorient_gm    = reorient.clone('anat_preproc_gm')
    reorient_wm    = reorient.clone('anat_preproc_wm')
    reorient_cm    = reorient.clone('anat_preproc_csf')
    reorient_first = reorient.clone('anat_preproc_first')

    make_mask    = Node(interface=fsl.UnaryMaths(),                        name = 'anat_preproc_mask')
    make_mask.inputs.operation = 'bin'

    # connect workflow nodes
    flow.connect(inputnode,    'anat'     , reorient,      'in_file'    )
    flow.connect(inputnode,    'anat_gm'  , reorient_gm,   'in_file'    )
    flow.connect(inputnode,    'anat_wm'  , reorient_wm,   'in_file'    )
    flow.connect(inputnode,    'anat_csf' , reorient_cm,   'in_file'    )
    flow.connect(inputnode,    'anat_first' , reorient_first,'in_file'    )
    flow.connect(reorient,     'out_file' , erode,        'in_file'    )
    flow.connect(erode,        'out_file' , make_mask,    'in_file'    )
    flow.connect(make_mask,    'out_file' , outputnode,   'brain_mask' )

    flow.connect(erode,        'out_file' , outputnode,   'brain'      )
    flow.connect(reorient_gm,  'out_file' , outputnode,   'brain_gm'   )
    flow.connect(reorient_wm,  'out_file' , outputnode,   'brain_wm'   )
    flow.connect(reorient_cm,  'out_file' , outputnode,   'brain_csf'  )
    flow.connect(reorient_first,  'out_file' , outputnode,   'brain_first' )

    return flow
예제 #40
0
def create_converter_diffusion_pipeline(working_dir, ds_dir, name="converter_diffusion"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["dMRI_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["dMRI"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_wf.connect(inputnode, "dMRI_dicom", converter_dMRI, "source_names")

    dMRI_rename = Node(util.Rename(format_string="DTI_mx_137.nii.gz"), name="dMRI_rename")
    converter_wf.connect(converter_dMRI, "converted_files", dMRI_rename, "in_file")

    bvecs_rename = Node(util.Rename(format_string="DTI_mx_137.bvecs"), name="bvecs_rename")
    converter_wf.connect(converter_dMRI, "bvecs", bvecs_rename, "in_file")

    bvals_rename = Node(util.Rename(format_string="DTI_mx_137.bvals"), name="bvals_rename")
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, "in_file")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(dMRI_rename, "out_file", reor_2_std, "in_file")
    converter_wf.connect(reor_2_std, "out_file", outputnode, "dMRI")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "dMRI.@dwi")
    converter_wf.connect(bvals_rename, "out_file", niftisink, "dMRI.@bvals")
    converter_wf.connect(bvecs_rename, "out_file", niftisink, "dMRI.@bvecs")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
예제 #41
0
파일: memory.py 프로젝트: Alunisiira/nipype
 def __call__(self, **kwargs):
     kwargs = modify_paths(kwargs, relative=False)
     interface = self.interface()
     # Set the inputs early to get some argument checking
     interface.inputs.set(**kwargs)
     # Make a name for our node
     inputs = interface.inputs.get_hashval()
     hasher = hashlib.new('md5')
     hasher.update(pickle.dumps(inputs))
     dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'),
                           interface.__class__.__name__)
     job_name = hasher.hexdigest()
     node = Node(interface, name=job_name)
     node.base_dir = os.path.join(self.base_dir, dir_name)
     out = node.run()
     if self.callback is not None:
         self.callback(dir_name, job_name)
     return out
예제 #42
0
def smooth_data(name = 'func_smoothed'):
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl as fsl

    flow        = Workflow(name)

    inputnode   = Node(util.IdentityInterface(fields=['func_data']),
                       name = 'inputnode')

    outputnode  =  Node(util.IdentityInterface(fields=['func_smoothed']),
                       name = 'outputnode')

    smooth      = Node(interface=fsl.Smooth(), name='func_smooth_fwhm_4')
    smooth.inputs.fwhm                 = 4.0
    smooth.terminal_output             = 'file'

    flow.connect(inputnode, 'func_data'      , smooth      , 'in_file'    )
    flow.connect(smooth,    'smoothed_file'  , outputnode  , 'func_smoothed'   )


    return flow
#### running directly ############################################################################################################

fmapepi.base_dir='/scr/kansas1/huntenburg/lemon_missing/working_dir/'
#fmapepi.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir = '/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#subjects=['LEMON001']
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/missing_subjects.txt','r')
for line in f:
    subjects.append(line.strip())

# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']), 
                  name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
infosource.iterables=('subject_id', subjects)

# define templates and select files
templates={'epi_mean':'preprocessed/{subject_id}/motion_correction/rest_moco_mean.nii.gz',
           'mag': 'raw/{subject_id}/rest/fmap_mag_1.nii.gz',
           'phase': 'raw/{subject_id}/rest/fmap_phase.nii.gz',
           'anat_head': 'preprocessed/{subject_id}/freesurfer_anatomy/T1_out.nii.gz',
           'anat_brain': 'preprocessed/{subject_id}/freesurfer_anatomy/brain_out.nii.gz',
           'wmseg':'preprocessed/{subject_id}/freesurfer_anatomy/brain_out_wmseg.nii.gz'}

selectfiles = Node(nio.SelectFiles(templates,
                                   base_directory=data_dir),
                   name="selectfiles")
예제 #44
0
# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/' 
data_dir= '/scr/ilz3/myelinconnect/'
out_dir = '/scr/ilz3/myelinconnect/final_struct_space/rest1_1_trans'

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# main workflow
smooth = Workflow(name='smooth')
smooth.base_dir = working_dir
smooth.config['execution']['crashdump_dir'] = smooth.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']), 
                  name='subject_infosource')
subject_infosource.iterables=[('subject', subjects_db)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']), 
                  name='session_infosource')
session_infosource.iterables=[('session', sessions)]

# select files
templates={'rest': 'final_struct_space/rest1_1_trans/{subject}_{session}_denoised_trans.nii.gz'
           }    
selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                   name="selectfiles")

smooth.connect([(subject_infosource, selectfiles, [('subject', 'subject')]),
                 (session_infosource, selectfiles, [('session', 'session')])
예제 #45
0
def create_denoise_pipeline(name='denoise'):
    # workflow
    denoise = Workflow(name='denoise')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=['anat_brain',
                                                              'brain_mask',
                                                              'epi2anat_dat',
                                                              'unwarped_mean',
                                                              'epi_coreg',
                                                              'moco_par',
                                                              'highpass_sigma',
                                                              'lowpass_sigma',
                                                              'tr']),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=['wmcsf_mask',
                                                               'brain_mask_resamp',
                                                               'brain_mask2epi',
                                                               'combined_motion',
                                                               'outlier_files',
                                                               'intensity_files',
                                                               'outlier_stats',
                                                               'outlier_plots',
                                                               'mc_regressor',
                                                               'mc_F',
                                                               'mc_pF',
                                                               'comp_regressor',
                                                               'comp_F',
                                                               'comp_pF',
                                                               # FL added fullspectrum
                                                               'ts_fullspectrum',
                                                               'normalized_file']),
                      name='outputnode')
    # run fast to get tissue probability classes
    fast = Node(fsl.FAST(), name='fast')
    denoise.connect([(inputnode, fast, [('anat_brain', 'in_files')])])
    # functions to select tissue classes
    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(np.array(filename_to_list(files))[idx].tolist())

    def selectsingle(files, idx):
        return files[idx]

    # resample tissue classes
    resample_tissue = MapNode(afni.Resample(resample_mode='NN',
                                            outputtype='NIFTI_GZ'),
                              iterfield=['in_file'],
                              name='resample_tissue')
    denoise.connect([(inputnode, resample_tissue, [('epi_coreg', 'master')]),
                     (fast, resample_tissue, [(('partial_volume_files', selectindex, [0, 2]), 'in_file')]),
                     ])
    # binarize tissue classes
    binarize_tissue = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.99 -ero -bin'),
                              iterfield=['in_file'],
                              name='binarize_tissue')
    denoise.connect([(resample_tissue, binarize_tissue, [('out_file', 'in_file')]),
                     ])
    # combine tissue classes to noise mask
    wmcsf_mask = Node(fsl.BinaryMaths(operation='add',
                                      out_file='wmcsf_mask_lowres.nii.gz'),
                      name='wmcsf_mask')
    denoise.connect([(binarize_tissue, wmcsf_mask, [(('out_file', selectsingle, 0), 'in_file'),
                                                    (('out_file', selectsingle, 1), 'operand_file')]),
                     (wmcsf_mask, outputnode, [('out_file', 'wmcsf_mask')])])
    # resample brain mask
    resample_brain = Node(afni.Resample(resample_mode='NN',
                                        outputtype='NIFTI_GZ',
                                        out_file='T1_brain_mask_lowres.nii.gz'),
                          name='resample_brain')
    denoise.connect([(inputnode, resample_brain, [('brain_mask', 'in_file'),
                                                  ('epi_coreg', 'master')]),
                     (resample_brain, outputnode, [('out_file', 'brain_mask_resamp')])])
    # project brain mask into original epi space fpr quality assessment
    brainmask2epi = Node(fs.ApplyVolTransform(interp='nearest',
                                              inverse=True,
                                              transformed_file='T1_brain_mask2epi.nii.gz', ),
                         name='brainmask2epi')
    denoise.connect([(inputnode, brainmask2epi, [('brain_mask', 'target_file'),
                                                 ('epi2anat_dat', 'reg_file'),
                                                 ('unwarped_mean', 'source_file')]),
                     (brainmask2epi, outputnode, [('transformed_file', 'brain_mask2epi')])])
    # perform artefact detection
    artefact = Node(ra.ArtifactDetect(save_plot=True,
                                      use_norm=True,
                                      parameter_source='FSL',
                                      mask_type='file',
                                      norm_threshold=1,
                                      zintensity_threshold=3,
                                      use_differences=[True, False]
                                      ),
                    name='artefact')
    artefact.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, artefact, [('epi_coreg', 'realigned_files'),
                                            ('moco_par', 'realignment_parameters')]),
                     (resample_brain, artefact, [('out_file', 'mask_file')]),
                     (artefact, outputnode, [('norm_files', 'combined_motion'),
                                             ('outlier_files', 'outlier_files'),
                                             ('intensity_files', 'intensity_files'),
                                             ('statistic_files', 'outlier_stats'),
                                             ('plot_files', 'outlier_plots')])])
    # Compute motion regressors
    motreg = Node(util.Function(input_names=['motion_params', 'order', 'derivatives'],
                                output_names=['out_files'],
                                function=motion_regressors),
                  name='getmotionregress')
    motreg.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, motreg, [('moco_par', 'motion_params')])])
    # Create a filter to remove motion and art confounds
    createfilter1 = Node(util.Function(input_names=['motion_params', 'comp_norm',
                                                    'outliers', 'detrend_poly'],
                                       output_names=['out_files'],
                                       function=build_filter1),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    createfilter1.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(motreg, createfilter1, [('out_files', 'motion_params')]),
                     (artefact, createfilter1, [  # ('norm_files', 'comp_norm'),
                                                  ('outlier_files', 'outliers')]),
                     (createfilter1, outputnode, [('out_files', 'mc_regressor')])
                     ])
    # regress out motion and art confounds
    filter1 = Node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                           out_pf_name='pF_mcart.nii.gz',
                           out_res_name='rest_mc_denoised.nii.gz',
                           demean=True),
                   name='filtermotion')
    filter1.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, filter1, [('epi_coreg', 'in_file')]),
                     (createfilter1, filter1, [(('out_files', list_to_filename), 'design')]),
                     (filter1, outputnode, [('out_f', 'mc_F'),
                                            ('out_pf', 'mc_pF')])])
    # create filter with compcor components
    createfilter2 = Node(util.Function(input_names=['realigned_file', 'mask_file',
                                                    'num_components',
                                                    'extra_regressors'],
                                       output_names=['out_files'],
                                       function=extract_noise_components),
                         name='makecompcorfilter')
    createfilter2.inputs.num_components = 6
    createfilter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(createfilter1, createfilter2, [(('out_files', list_to_filename), 'extra_regressors')]),
                     (filter1, createfilter2, [('out_res', 'realigned_file')]),
                     (wmcsf_mask, createfilter2, [('out_file', 'mask_file')]),
                     (createfilter2, outputnode, [('out_files', 'comp_regressor')]),
                     ])
    # regress compcor and other noise components
    filter2 = Node(fsl.GLM(out_f_name='F_noise.nii.gz',
                           out_pf_name='pF_noise.nii.gz',
                           out_res_name='rest2anat_denoised.nii.gz',
                           demean=True),
                   name='filternoise')
    filter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(filter1, filter2, [('out_res', 'in_file')]),
                     (createfilter2, filter2, [('out_files', 'design')]),
                     (resample_brain, filter2, [('out_file', 'mask')]),
                     (filter2, outputnode, [('out_f', 'comp_F'),
                                            ('out_pf', 'comp_pF')])
                     ])
    # bandpass filter denoised file
    bandpass_filter = Node(fsl.TemporalFilter(out_file='rest_denoised_bandpassed.nii.gz'),
                           name='bandpass_filter')
    bandpass_filter.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, bandpass_filter, [('highpass_sigma', 'highpass_sigma'),
                                                   ('lowpass_sigma', 'lowpass_sigma')]),
                     (filter2, bandpass_filter, [('out_res', 'in_file')]),
                     # FL added fullspectrum to outputnoded
                     (filter2, outputnode, [('out_res', 'ts_fullspectrum')])
                     ])
    # time-normalize scans
    normalize_time = Node(util.Function(input_names=['in_file', 'tr'],
                                        output_names=['out_file'],
                                        function=time_normalizer),
                          name='normalize_time')
    normalize_time.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, normalize_time, [('tr', 'tr')]),
                     (bandpass_filter, normalize_time, [('out_file', 'in_file')]),
                     (normalize_time, outputnode, [('out_file', 'normalized_file')])
                     ])
    return denoise
예제 #46
0
def calc_local_metrics(
    preprocessed_data_dir,
    subject_id,
    parcellations_dict,
    bp_freq_list,
    fd_thresh,
    working_dir,
    ds_dir,
    use_n_procs,
    plugin_name,
):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import utils as calc_metrics_utils

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    wf = Workflow(name="LeiCA_LIFE_metrics")
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(
        logging=dict(workflow_level="DEBUG"),
        execution={"stop_on_first_crash": True, "remove_unnecessary_outputs": True, "job_finished_timeout": 15},
    )
    config.update_config(nipype_cfg)
    wf.config["execution"]["crashdump_dir"] = os.path.join(working_dir, "crash")

    ds = Node(nio.DataSink(base_directory=ds_dir), name="ds")
    ds.inputs.regexp_substitutions = [
        ("MNI_resampled_brain_mask_calc.nii.gz", "falff.nii.gz"),
        ("residual_filtered_3dT.nii.gz", "alff.nii.gz"),
        ("_parcellation_", ""),
        ("_bp_freqs_", "bp_"),
    ]

    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=["parcellation"]), name="parcellation_infosource")
    parcellation_infosource.iterables = ("parcellation", parcellations_dict.keys())

    bp_filter_infosource = Node(util.IdentityInterface(fields=["bp_freqs"]), name="bp_filter_infosource")
    bp_filter_infosource.iterables = ("bp_freqs", bp_freq_list)

    selectfiles = Node(
        nio.SelectFiles(
            {
                "parcellation_time_series": "{subject_id}/con_mat/parcellated_time_series/bp_{bp_freqs}/{parcellation}/parcellation_time_series.npy"
            },
            base_directory=preprocessed_data_dir,
        ),
        name="selectfiles",
    )
    selectfiles.inputs.subject_id = subject_id
    wf.connect(parcellation_infosource, "parcellation", selectfiles, "parcellation")
    wf.connect(bp_filter_infosource, "bp_freqs", selectfiles, "bp_freqs")

    fd_file = Node(
        nio.SelectFiles({"fd_p": "{subject_id}/QC/FD_P_ts"}, base_directory=preprocessed_data_dir), name="fd_file"
    )
    fd_file.inputs.subject_id = subject_id

    ##############
    ## CON MATS
    ##############
    ##############
    ## extract ts
    ##############

    get_good_trs = Node(
        util.Function(
            input_names=["fd_file", "fd_thresh"],
            output_names=["good_trs", "fd_scrubbed_file"],
            function=calc_metrics_utils.get_good_trs,
        ),
        name="get_good_trs",
    )
    wf.connect(fd_file, "fd_p", get_good_trs, "fd_file")
    get_good_trs.inputs.fd_thresh = fd_thresh

    parcellated_ts_scrubbed = Node(
        util.Function(
            input_names=["parcellation_time_series_file", "good_trs"],
            output_names=["parcellation_time_series_scrubbed"],
            function=calc_metrics_utils.parcellation_time_series_scrubbing,
        ),
        name="parcellated_ts_scrubbed",
    )

    wf.connect(selectfiles, "parcellation_time_series", parcellated_ts_scrubbed, "parcellation_time_series_file")
    wf.connect(get_good_trs, "good_trs", parcellated_ts_scrubbed, "good_trs")

    ##############
    ## get conmat
    ##############
    con_mat = Node(
        util.Function(
            input_names=["in_data", "extraction_method"],
            output_names=["matrix", "matrix_file"],
            function=calc_metrics_utils.calculate_connectivity_matrix,
        ),
        name="con_mat",
    )
    con_mat.inputs.extraction_method = "correlation"
    wf.connect(parcellated_ts_scrubbed, "parcellation_time_series_scrubbed", con_mat, "in_data")

    ##############
    ## ds
    ##############

    wf.connect(get_good_trs, "fd_scrubbed_file", ds, "QC.@fd_scrubbed_file")
    fd_str = ("%.1f" % fd_thresh).replace(".", "_")
    wf.connect(con_mat, "matrix_file", ds, "con_mat.matrix_scrubbed_%s.@mat" % fd_str)

    # wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    # wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    # wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == "CondorDAGMan":
        wf.run(plugin=plugin_name, plugin_args={"initial_specs": "request_memory = 1500"})
    if plugin_name == "MultiProc":
        wf.run(plugin=plugin_name, plugin_args={"n_procs": use_n_procs})
예제 #47
0
                                  reference_image=template,
                                  terminal_output='file'),
                  name='apply2mean')

# Initiation of the ANTS normalization workflow
normflow = Workflow(name='normflow')
normflow.base_dir = opj(experiment_dir, working_dir)

# Connect up ANTS normalization components
normflow.connect([(antsreg, apply2con, [('composite_transform', 'transforms')]),
                  (antsreg, apply2mean, [('composite_transform',
                                          'transforms')])
                  ])

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - to grab the data (alternativ to DataGrabber)
anat_file = opj('freesurfer', '{subject_id}', 'mri/brain.mgz')
func_file = opj(input_dir_1st, 'contrasts', '{subject_id}',
                '_mriconvert*/*_out.nii.gz')
func_orig_file = opj(input_dir_1st, 'contrasts', '{subject_id}', '[ce]*.nii')
mean_file = opj(input_dir_1st, 'preprocout', '{subject_id}', 'mean*.nii')

templates = {'anat': anat_file,
             'func': func_file,
             'func_orig': func_orig_file,
             'mean': mean_file,
             }

# resample anatomy
resamp_anat = Node(fsl.FLIRT(apply_isoxfm=2.3),
                   name = 'resample_anat')
apply_ts.connect([(inputnode, resamp_anat, [('anat_head', 'in_file'),
                                            ('anat_head', 'reference'),
                                            ]),
                  ])



# apply fmap fullwarp
apply_fmap = Node(fsl.ApplyWarp(interp='spline',
                               relwarp=True,
                               out_file='fmap_ts.nii.gz', 
                               datatype='float'),
                 name='apply_fmap') 
   
apply_ts.connect([(inputnode, apply_fmap, [('moco_ts', 'in_file'),
                                         ('fmap_fullwarp', 'field_file')]),
                 (resamp_anat, apply_fmap, [('out_file', 'ref_file')]),
                 (apply_fmap, outputnode, [('out_file', 'fmap_ts')])
                 ])
apply_fmap.plugin_args={'initial_specs': 'request_memory = 8000'}


# apply topup fullwarp
apply_topup = Node(fsl.ApplyWarp(interp='spline',
                               relwarp=True,
                               out_file='topup_ts.nii.gz', 
예제 #49
0
def make_func_subcortical_masks(name = 'func_subcortical'):


    # Define Workflow
    flow        = Workflow(name=name)
    inputnode   = Node(util.IdentityInterface(fields=['func_first']),
                           name='inputnode')
    outputnode  = Node(util.IdentityInterface(fields=['left_nacc', 'left_amygdala',  'left_caudate',  'left_hipoocampus',  'left_pallidum',  'left_putamen', 'left_thalamus',
                                                      'right_nacc','right_amygdala', 'right_caudate', 'right_hipoocampus', 'right_pallidum', 'right_putamen','right_thalamus',
                                                      'midbrain', 'right_striatum', 'left_striatum']),
                           name = 'outputnode')

    left_nacc = Node(interface=fsl.ExtractROI(), name = 'left_nacc')
    left_nacc.inputs.t_min  = 0
    left_nacc.inputs.t_size = 1
    left_nacc.inputs.roi_file = 'left_nacc.nii.gz'

    left_amygdala = Node(interface=fsl.ExtractROI(), name = 'left_amygdala')
    left_amygdala.inputs.t_min  = 1
    left_amygdala.inputs.t_size = 1
    left_amygdala.inputs.roi_file = 'left_amygdala.nii.gz'

    left_caudate = Node(interface=fsl.ExtractROI(), name = 'left_caudate')
    left_caudate.inputs.t_min  = 2
    left_caudate.inputs.t_size = 1
    left_caudate.inputs.roi_file = 'left_caudate.nii.gz'

    left_hipoocampus = Node(interface=fsl.ExtractROI(), name = 'left_hipoocampus')
    left_hipoocampus.inputs.t_min  = 3
    left_hipoocampus.inputs.t_size = 1
    left_hipoocampus.inputs.roi_file = 'left_hipoocampus.nii.gz'

    left_pallidum = Node(interface=fsl.ExtractROI(), name = 'left_pallidum')
    left_pallidum.inputs.t_min  = 4
    left_pallidum.inputs.t_size = 1
    left_pallidum.inputs.roi_file = 'left_pallidum.nii.gz'

    left_putamen = Node(interface=fsl.ExtractROI(), name = 'left_putamen')
    left_putamen.inputs.t_min  = 5
    left_putamen.inputs.t_size = 1
    left_putamen.inputs.roi_file = 'left_putamen.nii.gz'

    left_thalamus = Node(interface=fsl.ExtractROI(), name = 'left_thalamus')
    left_thalamus.inputs.t_min  = 6
    left_thalamus.inputs.t_size = 1
    left_thalamus.inputs.roi_file = 'left_thalamus.nii.gz'

    ###############

    right_nacc = Node(interface=fsl.ExtractROI(), name = 'right_nacc')
    right_nacc.inputs.t_min  = 7
    right_nacc.inputs.t_size = 1
    right_nacc.inputs.roi_file = 'right_nacc.nii.gz'

    right_amygdala = Node(interface=fsl.ExtractROI(), name = 'right_amygdala')
    right_amygdala.inputs.t_min  = 8
    right_amygdala.inputs.t_size = 1
    right_amygdala.inputs.roi_file = 'right_amygdala.nii.gz'

    right_caudate = Node(interface=fsl.ExtractROI(), name = 'right_caudate')
    right_caudate.inputs.t_min  = 9
    right_caudate.inputs.t_size = 1
    right_caudate.inputs.roi_file = 'right_caudate.nii.gz'

    right_hipoocampus = Node(interface=fsl.ExtractROI(), name = 'right_hipoocampus')
    right_hipoocampus.inputs.t_min  = 10
    right_hipoocampus.inputs.t_size = 1
    right_hipoocampus.inputs.roi_file = 'right_hipoocampus.nii.gz'

    right_pallidum = Node(interface=fsl.ExtractROI(), name = 'right_pallidum')
    right_pallidum.inputs.t_min  = 11
    right_pallidum.inputs.t_size = 1
    right_pallidum.inputs.roi_file = 'right_pallidum.nii.gz'

    right_putamen = Node(interface=fsl.ExtractROI(), name = 'right_putamen')
    right_putamen.inputs.t_min  = 12
    right_putamen.inputs.t_size = 1
    right_putamen.inputs.roi_file = 'right_putamen.nii.gz'

    right_thalamus = Node(interface=fsl.ExtractROI(), name = 'right_thalamus')
    right_thalamus.inputs.t_min  = 13
    right_thalamus.inputs.t_size = 1
    right_thalamus.inputs.roi_file = 'right_thalamus.nii.gz'

    midbrain = Node(interface=fsl.ExtractROI(), name = 'midbrain')
    midbrain.inputs.t_min  = 14
    midbrain.inputs.t_size = 1
    midbrain.inputs.roi_file = 'midbrain.nii.gz'

    flow.connect( inputnode  ,   'func_first'   ,   left_nacc,       'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_amygdala,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_caudate,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_hipoocampus,'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_pallidum,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_putamen,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_thalamus,   'in_file'     )

    flow.connect( inputnode  ,   'func_first'   ,   right_nacc,       'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_amygdala,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_caudate,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_hipoocampus,'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_pallidum,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_putamen,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_thalamus,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   midbrain,         'in_file'     )

    flow.connect( left_nacc        ,   'roi_file'   ,outputnode   ,   'left_nacc'       )
    flow.connect( left_amygdala    ,   'roi_file'   ,outputnode   ,   'left_amygdala'   )
    flow.connect( left_caudate     ,   'roi_file'   ,outputnode   ,   'left_caudate'    )
    flow.connect( left_hipoocampus ,   'roi_file'   ,outputnode   ,   'left_hipoocampus')
    flow.connect( left_pallidum    ,   'roi_file'   ,outputnode   ,   'left_pallidum')
    flow.connect( left_putamen     ,   'roi_file'   ,outputnode   ,   'left_putamen'    )
    flow.connect( left_thalamus    ,   'roi_file'   ,outputnode   ,   'left_thalamus'   )
    flow.connect( right_nacc       ,   'roi_file'   ,outputnode   ,   'right_nacc'       )
    flow.connect( right_amygdala   ,   'roi_file'   ,outputnode   ,   'right_amygdala'   )
    flow.connect( right_caudate    ,   'roi_file'   ,outputnode   ,   'right_caudate'    )
    flow.connect( right_hipoocampus,   'roi_file'   ,outputnode   ,   'right_hipoocampus')
    flow.connect( right_pallidum   ,   'roi_file'   ,outputnode   ,   'right_pallidum')
    flow.connect( right_putamen    ,   'roi_file'   ,outputnode   ,   'right_putamen'    )
    flow.connect( right_thalamus   ,   'roi_file'   ,outputnode   ,   'right_thalamus'   )
    flow.connect( midbrain         ,   'roi_file'   ,outputnode   ,   'midbrain'         )


    # add images together
    right_striatum = Node(interface=fsl.MultiImageMaths(), name = 'right_striatum')
    right_striatum.inputs.op_string = '-add %s -add %s -bin'
    right_striatum.out_file         = 'right_striatum.nii.gz'
    list_R_str = Node(util.Function(input_names = ['file_1', 'file_2'],
                                    output_names= ['list'],
                                    function    = return_list),
                                    name        = 'list_str_r')

    flow.connect( right_pallidum     ,   'roi_file'   ,list_R_str       ,   'file_1'         )
    flow.connect( right_putamen      ,   'roi_file'   ,list_R_str       ,   'file_2'         )
    flow.connect( right_caudate      ,   'roi_file'   ,right_striatum   ,   'in_file'        )
    flow.connect( list_R_str         ,   'list'       ,right_striatum   ,   'operand_files'  )
    flow.connect( right_striatum     ,   'out_file'   ,outputnode       ,   'right_striatum' )


    left_striatum = Node(interface=fsl.MultiImageMaths(), name = 'left_striatum')
    left_striatum.inputs.op_string = '-add %s -add %s'
    left_striatum.out_file         = 'left_striatum.nii.gz'
    list_L_str =  list_R_str.clone('list_str_l')

    flow.connect( left_pallidum     ,   'roi_file'   ,list_L_str       ,   'file_1'         )
    flow.connect( left_putamen      ,   'roi_file'   ,list_L_str       ,   'file_2'         )
    flow.connect( left_caudate      ,   'roi_file'   ,left_striatum    ,   'in_file'        )
    flow.connect( list_L_str        ,   'list'       ,left_striatum    ,   'operand_files'  )
    flow.connect( left_striatum     ,   'out_file'   ,outputnode       ,   'left_striatum'  )


    return flow
예제 #50
0
###################################################################################################################################
# # artefact detection
# ad = Node(ra.ArtifactDetect(save_plot=False,
#                             norm_threshold=1,
#                             zintensity_threshold=3,
#                             mask_type='spm_global',
#                             use_differences = [True, False],
#                             parameter_source='FSL'),
#           name='artefactdetect')
#
# #wf.connect(getmask, 'outputspec.mask',ad, 'mask_file') mask_type='file'


###################################################################################################################################
# tsnr (input is timeseries from inputnode)
tsnr = Node(TSNR(regress_poly=2), name="tsnr")
tsnr.plugin_args = {"initial_specs": "request_memory = 30000"}


###################################################################################################################################
# create noise mask file
getthresh = Node(interface=fsl.ImageStats(op_string="-p 98"), name="getthreshold")
getthresh.plugin_args = {"initial_specs": "request_memory = 30000"}

threshold_stddev = Node(fsl.Threshold(), name="threshold")
threshold_stddev.plugin_args = {"initial_specs": "request_memory = 30000"}

preproc.connect(tsnr, "stddev_file", threshold_stddev, "in_file")
preproc.connect(tsnr, "stddev_file", getthresh, "in_file")
preproc.connect(getthresh, "out_stat", threshold_stddev, "thresh")
preproc.connect(threshold_stddev, "out_file", outputnode, "noise_mask_file")
예제 #51
0
def create_registration_pipeline(working_dir, freesurfer_dir, ds_dir, name='registration'):
    """
    find transformations between struct, funct, and MNI
    """

    # initiate workflow
    reg_wf = Workflow(name=name)
    reg_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting', 'rsfMRI_preprocessing')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['initial_mean_epi_moco',
                                                    't1w',
                                                    't1w_brain',
                                                    'subject_id',
                                                    'wm_mask_4_bbr',
                                                    'struct_brain_mask']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['struct_2_MNI_warp',
                                                     'epi_2_struct_mat',
                                                     'struct_2_epi_mat',
                                                     'epi_2_MNI_warp',
                                                     'MNI_2_epi_warp',
                                                     'fs_2_struct_mat',
                                                     'mean_epi_structSpace',
                                                     'mean_epi_MNIspace',
                                                     'struct_MNIspace']),
                      name='outputnode')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]




    ##########################################
    # TOC REGISTRATION MATS AND WARPS
    ##########################################
    # I. STRUCT -> MNI
    ## 1. STRUCT -> MNI with FLIRT
    ## 2. CALC. WARP STRUCT -> MNI with FNIRT

    # II.EPI -> STRUCT
    ## 3. calc EPI->STRUCT initial registration
    ## 4. run EPI->STRUCT via bbr
    ## 5. INVERT to get: STRUCT -> EPI

    # III. COMBINE I. & II.: EPI -> MNI
    ## 6. COMBINE MATS: EPI -> MNI
    ## 7. MNI -> EPI


    ##########################################
    # CREATE REGISTRATION MATS AND WARPS
    ##########################################

    # I. STRUCT -> MNI
    ##########################################
    # 1. REGISTER STRUCT -> MNI with FLIRT
    struct_2_MNI_mat = Node(fsl.FLIRT(dof=12), name='struct_2_MNI_mat')
    struct_2_MNI_mat.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')

    reg_wf.connect(inputnode, 't1w_brain', struct_2_MNI_mat, 'in_file')
    reg_wf.connect(struct_2_MNI_mat, 'out_matrix_file', outputnode, 'struct_2_MNI_mat_flirt')



    # 2. CALC. WARP STRUCT -> MNI with FNIRT
    # cf. wrt. 2mm
    # https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1311&L=FSL&P=R86108&1=FSL&9=A&J=on&d=No+Match%3BMatch%3BMatches&z=4
    struct_2_MNI_warp = Node(fsl.FNIRT(), name='struct_2_MNI_warp')
    struct_2_MNI_warp.inputs.config_file = 'T1_2_MNI152_2mm'
    struct_2_MNI_warp.inputs.ref_file = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    struct_2_MNI_warp.inputs.field_file = 'struct_2_MNI_warp.nii.gz'
    struct_2_MNI_warp.plugin_args = {'submit_specs': 'request_memory = 4000'}


    reg_wf.connect(inputnode, 't1w', struct_2_MNI_warp, 'in_file')
    reg_wf.connect(struct_2_MNI_mat, 'out_matrix_file', struct_2_MNI_warp, 'affine_file')
    reg_wf.connect(struct_2_MNI_warp, 'field_file', ds, 'registration.struct_2_MNI_warp')
    reg_wf.connect(struct_2_MNI_warp, 'field_file', outputnode, 'struct_2_MNI_warp')
    reg_wf.connect(struct_2_MNI_warp, 'warped_file', outputnode, 'struct_MNIspace')
    reg_wf.connect(struct_2_MNI_warp, 'warped_file', ds, 'registration.struct_MNIspace')


    # II.EPI -> STRUCT (via bbr)
    ##########################################

    # 3. calc EPI->STRUCT initial registration with flirt dof=6 and corratio
    epi_2_struct_flirt6_mat = Node(fsl.FLIRT(dof=6, cost='corratio'), name='epi_2_struct_flirt6_mat')
    epi_2_struct_flirt6_mat.inputs.out_file = 'epi_structSpace_flirt6.nii.gz'
    reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_flirt6_mat, 'reference')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_flirt6_mat, 'in_file')

    # 4. run EPI->STRUCT via bbr
    bbr_shedule = os.path.join(os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch')
    epi_2_struct_bbr_mat = Node(interface=fsl.FLIRT(dof=6, cost='bbr'), name='epi_2_struct_bbr_mat')
    epi_2_struct_bbr_mat.inputs.schedule = bbr_shedule
    epi_2_struct_bbr_mat.inputs.out_file = 'epi_structSpace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_bbr_mat, 'in_file')
    reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_bbr_mat, 'reference')
    reg_wf.connect(epi_2_struct_flirt6_mat, 'out_matrix_file', epi_2_struct_bbr_mat, 'in_matrix_file')
    reg_wf.connect(inputnode, 'wm_mask_4_bbr', epi_2_struct_bbr_mat, 'wm_seg')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', ds, 'registration.epi_2_struct_mat')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_file', outputnode, 'mean_epi_structSpace')


    # 5. INVERT to get: STRUCT -> EPI
    struct_2_epi_mat = Node(fsl.ConvertXFM(invert_xfm=True), name='struct_2_epi_mat')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', struct_2_epi_mat, 'in_file')
    reg_wf.connect(struct_2_epi_mat, 'out_file', outputnode, 'struct_2_epi_mat')


    # III. COMBINE I. & II.: EPI -> MNI
    ##########################################
    # 6. COMBINE MATS: EPI -> MNI
    epi_2_MNI_warp = Node(fsl.ConvertWarp(), name='epi_2_MNI_warp')
    epi_2_MNI_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', epi_2_MNI_warp, 'premat')  # epi2struct
    reg_wf.connect(struct_2_MNI_warp, 'field_file', epi_2_MNI_warp, 'warp1')  # struct2mni
    reg_wf.connect(epi_2_MNI_warp, 'out_file', outputnode, 'epi_2_MNI_warp')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', ds, 'registration.epi_2_MNI_warp')


    # output: out_file

    # 7. MNI -> EPI
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', MNI_2_epi_warp, 'warp')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_2_epi_warp, 'reference')
    reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', outputnode, 'MNI_2_epi_warp')
    # output: inverse_warp




    ##########################################
    # TRANSFORM VOLUMES
    ##########################################

    # CREATE STRUCT IN EPI SPACE FOR DEBUGGING
    struct_epiSpace = Node(fsl.ApplyXfm(), name='struct_epiSpace')
    struct_epiSpace.inputs.out_file = 'struct_brain_epiSpace.nii.gz'
    reg_wf.connect(inputnode, 't1w_brain', struct_epiSpace, 'in_file')
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', struct_epiSpace, 'reference')
    reg_wf.connect(struct_2_epi_mat, 'out_file', struct_epiSpace, 'in_matrix_file')
    reg_wf.connect(struct_epiSpace, 'out_file', ds, 'QC.struct_brain_epiSpace')

    # CREATE EPI IN MNI SPACE
    mean_epi_MNIspace = Node(fsl.ApplyWarp(), name='mean_epi_MNIspace')
    mean_epi_MNIspace.inputs.ref_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
    mean_epi_MNIspace.inputs.out_file = 'mean_epi_MNIspace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', mean_epi_MNIspace, 'in_file')
    reg_wf.connect(epi_2_MNI_warp, 'out_file', mean_epi_MNIspace, 'field_file')
    reg_wf.connect(mean_epi_MNIspace, 'out_file', ds, 'registration.mean_epi_MNIspace')
    reg_wf.connect(mean_epi_MNIspace, 'out_file', outputnode, 'mean_epi_MNIspace')



    # CREATE MNI IN EPI SPACE FOR DEBUGGING
    MNI_epiSpace = Node(fsl.ApplyWarp(), name='MNI_epiSpace')
    MNI_epiSpace.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
    MNI_epiSpace.inputs.out_file = 'MNI_epiSpace.nii.gz'
    reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_epiSpace, 'ref_file')
    reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', MNI_epiSpace, 'field_file')
    reg_wf.connect(MNI_epiSpace, 'out_file', ds, 'registration.MNI_epiSpace')



    reg_wf.write_graph(dotfilename=reg_wf.name, graph2use='flat', format='pdf')

    return reg_wf
예제 #52
0
def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {'X_file': os.path.join(aggregated_subjects_dir, X_file_template.format(
            in_data_name=k)),
                                   'unimodal_backprojection_info_file': os.path.join(aggregated_subjects_dir,
                                                                                     info_file_template.format(
                                                                                         in_data_name=k))
                                   }



    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                      output_names=['X_multimodal_file',
                                                                    'multimodal_backprojection_info',
                                                                    'multimodal_name'],
                                                      function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics,
               'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict



    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                      'subjects_selection_crit_dict',
                                                      'selection_criterium'],
                                         output_names=['df_use_file',
                                                       'df_use_pickle_file',
                                                       'subjects_selection_index'],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium', select_subjects, 'selection_criterium')



    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                          'selection_criterium'],
                                             output_names=['X_multimodal_selected_file'],
                                             function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file', select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index', select_multimodal_X, 'subjects_selection_index')






    ###############################################################################################################
    # COMPILE NKI DATA
    ###############################################################################################################
    if run_2sample_training:

        ###############################################################################################################
        # GET INFO AND SELECT FILES
        df_all_subjects_pickle_file_nki = os.path.join(aggregated_subjects_dir_nki,
                                                       'df_all_subjects_pickle_file/df_all.pkl')
        df_nki = pd.read_pickle(df_all_subjects_pickle_file_nki)

        # build lookup dict for unimodal data
        X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
        info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
        unimodal_lookup_dict_nki = {}
        for k in in_data_name_list_unique:
            unimodal_lookup_dict_nki[k] = {'X_file': os.path.join(aggregated_subjects_dir_nki, X_file_template.format(
                in_data_name=k)),
                                           'unimodal_backprojection_info_file': os.path.join(
                                               aggregated_subjects_dir_nki,
                                               info_file_template.format(
                                                   in_data_name=k))
                                           }



        ###############################################################################################################
        # AGGREGATE MULTIMODAL METRICS
        # stack single modality arrays horizontally
        aggregate_multimodal_metrics_nki = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                              output_names=['X_multimodal_file',
                                                                            'multimodal_backprojection_info',
                                                                            'multimodal_name'],
                                                              function=aggregate_multimodal_metrics_fct),
                                                name='aggregate_multimodal_metrics_nki')
        wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics_nki,
                   'multimodal_list')
        aggregate_multimodal_metrics_nki.inputs.unimodal_lookup_dict = unimodal_lookup_dict_nki



        ###############################################################################################################
        # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
        select_subjects_nki = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                              'subjects_selection_crit_dict',
                                                              'selection_criterium'],
                                                 output_names=['df_use_file',
                                                               'df_use_pickle_file',
                                                               'subjects_selection_index'],
                                                 function=select_subjects_fct),
                                   name='select_subjects_nki')

        select_subjects_nki.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file_nki
        select_subjects_nki.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict_nki
        select_subjects_nki.inputs.selection_criterium = subjects_selection_crit_name_nki



        ###############################################################################################################
        # SELECT MULITMODAL X
        # select subjects (rows) from multimodal X according indexer
        select_multimodal_X_nki = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                                  'selection_criterium'],
                                                     output_names=['X_multimodal_selected_file'],
                                                     function=select_multimodal_X_fct),
                                       name='select_multimodal_X_nki')
        wf.connect(aggregate_multimodal_metrics_nki, 'X_multimodal_file', select_multimodal_X_nki, 'X_multimodal_file')
        wf.connect(select_subjects_nki, 'subjects_selection_index', select_multimodal_X_nki, 'subjects_selection_index')





    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    backprojection_node_dict = {}

    prediction_split = Node(util.Function(input_names=['X_file',
                                                       'target_name',
                                                       'selection_criterium',
                                                       'df_file',
                                                       'data_str',
                                                       'regress_confounds',
                                                       'run_cv',
                                                       'n_jobs_cv',
                                                       'run_tuning',
                                                       'X_file_nki',
                                                       'df_file_nki',
                                                       'reverse_split',
                                                       'random_state_nki',
                                                       'run_learning_curve',
                                                       'life_test_size'],
                                          output_names=['scatter_file',
                                                        'brain_age_scatter_file',
                                                        'df_life_out_file',
                                                        'df_nki_out_file',
                                                        'df_big_out_file',
                                                        'model_out_file',
                                                        'df_res_out_file',
                                                        'tuning_curve_file',
                                                        'scatter_file_cv',
                                                        'learning_curve_plot_file',
                                                        'learning_curve_df_file'],
                                          function=run_prediction_split_fct),
                            name='prediction_split')

    backproject_and_split_weights = Node(util.Function(input_names=['trained_model_file',
                                                                    'multimodal_backprojection_info',
                                                                    'data_str',
                                                                    'target_name'],
                                                       output_names=['out_file_list',
                                                                     'out_file_render_list'],
                                                       function=backproject_and_split_weights_fct),
                                         name='backproject_and_split_weights')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % (reg)
        prediction_node_dict[i] = prediction_split.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg
        the_in_node.inputs.run_cv = run_cv
        the_in_node.inputs.n_jobs_cv = n_jobs_cv
        the_in_node.inputs.run_tuning = run_tuning
        the_in_node.inputs.reverse_split = reverse_split
        the_in_node.inputs.random_state_nki = random_state_nki
        the_in_node.inputs.run_learning_curve = run_learning_curve
        the_in_node.inputs.life_test_size = life_test_size

        wf.connect(select_multimodal_X, 'X_multimodal_selected_file', the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium', the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node, 'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')

        wf.connect(the_in_node, 'model_out_file', ds, the_out_node_str + 'trained_model')
        wf.connect(the_in_node, 'scatter_file', ds_pdf, the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf, the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_life_out_file', ds_pdf, the_out_node_str + 'predicted_life')
        wf.connect(the_in_node, 'df_nki_out_file', ds_pdf, the_out_node_str + 'predicted_nki')
        wf.connect(the_in_node, 'df_big_out_file', ds_pdf, the_out_node_str + 'predicted')

        wf.connect(the_in_node, 'df_res_out_file', ds_pdf, the_out_node_str + 'results_error')
        wf.connect(the_in_node, 'tuning_curve_file', ds_pdf, the_out_node_str + 'tuning_curve')
        wf.connect(the_in_node, 'scatter_file_cv', ds_pdf, the_out_node_str + 'scatter_cv')
        wf.connect(the_in_node, 'learning_curve_plot_file', ds_pdf, the_out_node_str + 'learning_curve_plot_file.@plot')
        wf.connect(the_in_node, 'learning_curve_df_file', ds_pdf, the_out_node_str + 'learning_curve_df_file.@df')

        # NKI
        if run_2sample_training:
            wf.connect(select_multimodal_X_nki, 'X_multimodal_selected_file', the_in_node, 'X_file_nki')
            wf.connect(select_subjects_nki, 'df_use_pickle_file', the_in_node, 'df_file_nki')

        else:
            the_in_node.inputs.df_file_nki = None
            the_in_node.inputs.X_file_nki = None

        # BACKPROJECT PREDICTION WEIGHTS
        # map weights back to single modality original format (e.g., nifti or matrix)
        the_out_node_str = 'backprojection_single_source_model_reg_%s_' % (reg)
        backprojection_node_dict[i] = backproject_and_split_weights.clone(the_out_node_str)
        the_from_node = prediction_node_dict[i]
        the_in_node = backprojection_node_dict[i]
        wf.connect(the_from_node, 'model_out_file', the_in_node, 'trained_model_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_backprojection_info', the_in_node,
                   'multimodal_backprojection_info')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')

        wf.connect(the_in_node, 'out_file_list', ds_pdf, the_out_node_str + '.@weights')
        wf.connect(the_in_node, 'out_file_render_list', ds_pdf, the_out_node_str + 'renders.@renders')

        i += 1



    ###############################################################################################################
    # #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
예제 #53
0
def learning_prepare_data_wf(working_dir,
                             ds_dir,
                             template_lookup_dict,
                             behav_file,
                             qc_file,
                             in_data_name_list,
                             data_lookup_dict,
                             use_n_procs,
                             plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from prepare_data_utils import vectorize_and_aggregate
    from itertools import chain

    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))


    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='learning_prepare_data_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': False,
                                                                       'job_finished_timeout': 120,
                                                                       'hash_method': 'timestamp'})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]

    ds_X = Node(nio.DataSink(), name='ds_X')
    ds_X.inputs.base_directory = os.path.join(ds_dir, 'vectorized_aggregated_data')

    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False


    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    mulitmodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='mulitmodal_in_data_name_infosource')
    mulitmodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)



    ###############################################################################################################
    # GET SUBJECTS INFO
    # create subjects list based on selection criteria

    def create_df_fct(behav_file, qc_file):
        import pandas as pd
        import os
        df = pd.read_pickle(behav_file)
        qc = pd.read_pickle(qc_file)
        df_all = qc.join(df, how='inner')

        assert df_all.index.is_unique, 'duplicates in df index. fix before cont.'

        df_all_subjects_pickle_file = os.path.abspath('df_all.pkl')
        df_all.to_pickle(df_all_subjects_pickle_file)

        full_subjects_list = df_all.index.values

        return df_all_subjects_pickle_file, full_subjects_list

    create_df = Node(util.Function(input_names=['behav_file', 'qc_file'],
                                   output_names=['df_all_subjects_pickle_file', 'full_subjects_list'],
                                   function=create_df_fct),
                     name='create_df')
    create_df.inputs.behav_file = behav_file
    create_df.inputs.qc_file = qc_file


    ###############################################################################################################
    # CREAE FILE LIST
    # of files that will be aggregted

    def create_file_list_fct(subjects_list, in_data_name, data_lookup_dict, template_lookup_dict):
        file_list = []
        for s in subjects_list:
            file_list.append(data_lookup_dict[in_data_name]['path_str'].format(subject_id=s))

        if 'matrix_name' in data_lookup_dict[in_data_name].keys():
            matrix_name = data_lookup_dict[in_data_name]['matrix_name']
        else:
            matrix_name = None

        if 'parcellation_path' in data_lookup_dict[in_data_name].keys():
            parcellation_path = data_lookup_dict[in_data_name]['parcellation_path']
        else:
            parcellation_path = None

        if 'fwhm' in data_lookup_dict[in_data_name].keys():
            fwhm = data_lookup_dict[in_data_name]['fwhm']
            if fwhm == 0:
                fwhm = None
        else:
            fwhm = None

        if 'mask_name' in data_lookup_dict[in_data_name].keys():
            mask_path = template_lookup_dict[data_lookup_dict[in_data_name]['mask_name']]
        else:
            mask_path = None

        if 'use_diagonal' in data_lookup_dict[in_data_name].keys():
            use_diagonal = data_lookup_dict[in_data_name]['use_diagonal']
        else:
            use_diagonal = False

        if 'use_fishers_z' in data_lookup_dict[in_data_name].keys():
            use_fishers_z = data_lookup_dict[in_data_name]['use_fishers_z']
        else:
            use_fishers_z = False

        if 'df_col_names' in data_lookup_dict[in_data_name].keys():
            df_col_names = data_lookup_dict[in_data_name]['df_col_names']
        else:
            df_col_names = None

        return file_list, matrix_name, parcellation_path, fwhm, mask_path, use_diagonal, use_fishers_z, df_col_names

    create_file_list = Node(util.Function(input_names=['subjects_list',
                                                       'in_data_name',
                                                       'data_lookup_dict',
                                                       'template_lookup_dict',
                                                       ],
                                          output_names=['file_list',
                                                        'matrix_name',
                                                        'parcellation_path',
                                                        'fwhm',
                                                        'mask_path',
                                                        'use_diagonal',
                                                        'use_fishers_z',
                                                        'df_col_names'],
                                          function=create_file_list_fct),
                            name='create_file_list')
    wf.connect(create_df, 'full_subjects_list', create_file_list, 'subjects_list')
    wf.connect(in_data_name_infosource, 'in_data_name', create_file_list, 'in_data_name')
    create_file_list.inputs.data_lookup_dict = data_lookup_dict
    create_file_list.inputs.template_lookup_dict = template_lookup_dict




    ###############################################################################################################
    # VECTORIZE AND AGGREGATE SUBJECTS
    # stack single subject np arrays vertically
    vectorize_aggregate_subjects = Node(util.Function(input_names=['in_data_file_list',
                                                                   'mask_file',
                                                                   'matrix_name',
                                                                   'parcellation_path',
                                                                   'fwhm',
                                                                   'use_diagonal',
                                                                   'use_fishers_z',
                                                                   'df_file',
                                                                   'df_col_names'],
                                                      output_names=['vectorized_aggregated_file',
                                                                    'unimodal_backprojection_info_file'],
                                                      function=vectorize_and_aggregate),
                                        name='vectorize_aggregate_subjects')
    wf.connect(create_file_list, 'file_list', vectorize_aggregate_subjects, 'in_data_file_list')
    wf.connect(create_file_list, 'mask_path', vectorize_aggregate_subjects, 'mask_file')
    wf.connect(create_file_list, 'matrix_name', vectorize_aggregate_subjects, 'matrix_name')
    wf.connect(create_file_list, 'parcellation_path', vectorize_aggregate_subjects, 'parcellation_path')
    wf.connect(create_file_list, 'fwhm', vectorize_aggregate_subjects, 'fwhm')
    wf.connect(create_file_list, 'use_diagonal', vectorize_aggregate_subjects, 'use_diagonal')
    wf.connect(create_file_list, 'use_fishers_z', vectorize_aggregate_subjects, 'use_fishers_z')
    wf.connect(create_df, 'df_all_subjects_pickle_file', vectorize_aggregate_subjects, 'df_file')
    wf.connect(create_file_list, 'df_col_names', vectorize_aggregate_subjects, 'df_col_names')

    wf.connect(create_df, 'df_all_subjects_pickle_file', ds_X, 'df_all_subjects_pickle_file')
    wf.connect(vectorize_aggregate_subjects, 'vectorized_aggregated_file', ds_X, 'X_file')
    wf.connect(vectorize_aggregate_subjects, 'unimodal_backprojection_info_file', ds_X, 'unimodal_backprojection_info_file')



    #####################################
    # RUN WF
    #####################################
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
예제 #54
0
def downsampel_surfs(subject_id,
                     working_dir,
                     freesurfer_dir,
                     ds_dir,
                     plugin_name,
                     use_n_procs):
    '''
    Workflow resamples e.g. native thickness maps to fsaverage5 space
    '''

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    fs.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='freesurfer_downsample')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 15})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.parameterization = False



    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    infosource = Node(util.IdentityInterface(fields=['hemi', 'surf_measure', 'fwhm', 'target']), name='infosource')
    infosource.iterables = [('hemi', ['lh', 'rh']),
                            ('surf_measure', ['thickness', 'area']),
                            ('fwhm', [0, 5, 10, 20]),
                            ('target', ['fsaverage3', 'fsaverage4', 'fsaverage5']),
                            ]

    downsample = Node(fs.model.MRISPreproc(), name='downsample')
    downsample.inputs.subjects = [subject_id]
    wf.connect(infosource, 'target', downsample, 'target')
    wf.connect(infosource, 'hemi', downsample, 'hemi')
    wf.connect(infosource, 'surf_measure', downsample, 'surf_measure')
    wf.connect(infosource, 'fwhm', downsample, 'fwhm_source')

    rename = Node(util.Rename(format_string='%(hemi)s.%(surf_measure)s.%(target)s.%(fwhm)smm'), name='rename')
    rename.inputs.keep_ext = True
    wf.connect(infosource, 'target', rename, 'target')
    wf.connect(infosource, 'hemi', rename, 'hemi')
    wf.connect(infosource, 'surf_measure', rename, 'surf_measure')
    wf.connect(infosource, 'fwhm', rename, 'fwhm')
    wf.connect(downsample, 'out_file', rename, 'in_file')

    wf.connect(rename, 'out_file', ds, 'surfs.@surfs')





    #
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
예제 #55
0
Created on Tue Aug 25 11:39:05 2015

@author: craigmoodie
"""

from nipype.pipeline.engine import Workflow, Node, MapNode
from variables import data_dir, work_dir, subject_list, plugin, plugin_args


surface_workflow = Workflow(name="qc_workflow")

surface_workflow.base_dir = work_dir


from nipype import SelectFiles
templates = dict(T1="*_{subject_id}_*/T1w_MPR_BIC_v1/*00001.nii*")
file_list = Node(SelectFiles(templates), name = "EPI_and_T1_File_Selection")
file_list.inputs.base_directory = data_dir
file_list.iterables = [("subject_id", subject_list)]


from nipype.interfaces.freesurfer import ReconAll
reconall = Node(ReconAll(), name = "Recon_All")
reconall.inputs.subject_id = "subject_id"
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = data_dir
#reconall.inputs.T1_files = "T1"
#reconall.run()

surface_workflow.connect(file_list,"T1", reconall, "T1_files")
surface_workflow.run(plugin=plugin, plugin_args=plugin_args)
예제 #56
0
def calc_local_metrics(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    import CPAC.alff.alff as cpac_alff
    import CPAC.reho.reho as cpac_reho
    import CPAC.utils.utils as cpac_utils
    import CPAC.vmhc.vmhc as cpac_vmhc
    import CPAC.registration.registration as cpac_registration
    import CPAC.network_centrality.z_score as cpac_centrality_z_score

    import utils as calc_metrics_utils


    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    preprocessed_data_dir = cfg['preprocessed_data_dir']

    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subject_id = cfg['subject_id']
    TR_list = cfg['TR_list']

    vols_to_drop = cfg['vols_to_drop']
    rois_list = cfg['rois_list']
    lp_cutoff_freq = cfg['lp_cutoff_freq']
    hp_cutoff_freq = cfg['hp_cutoff_freq']
    use_fs_brainmask = cfg['use_fs_brainmask']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']



    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]
    ds.inputs.regexp_substitutions = [('_variabilty_MNIspace_3mm[0-9]*/', ''), ('_z_score[0-9]*/', '')]


    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)



    # get atlas data
    templates_atlases = {  # 'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                           # 'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                           'FSL_MNI_3mm_template': 'MNI152_T1_3mm_brain.nii.gz',
                           'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
                           'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
                           'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
                           'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
                           'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
                           }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")


    # GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
    selectfiles_templates = {
        'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
        'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
        'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
        'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
        't1w_brain': '{subject_id}/rsfMRI_preprocessing/struct_prep/t1w_brain/t1w_reoriented_maths.nii.gz',
    }

    selectfiles = Node(nio.SelectFiles(selectfiles_templates,
                                       base_directory=preprocessed_data_dir),
                       name="selectfiles")
    wf.connect(scan_infosource, 'TR_id', selectfiles, 'TR_id')
    selectfiles.inputs.subject_id = subject_id



    # CREATE TRANSFORMATIONS
    # creat MNI 2 epi warp
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    wf.connect(selectfiles, 'epi_mask', MNI_2_epi_warp, 'reference')
    wf.connect(selectfiles, 'epi_2_MNI_warp', MNI_2_epi_warp, 'warp')


    # # CREATE GM MASK IN EPI SPACE
    # GM_mask_epiSpace = Node(fsl.ApplyWarp(), name='GM_mask_epiSpace')
    # GM_mask_epiSpace.inputs.out_file = 'GM_mask_epiSpace.nii.gz'
    #
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_2mm', GM_mask_epiSpace, 'in_file')
    # wf.connect(selectfiles, 'epi_mask', GM_mask_epiSpace, 'ref_file')
    # wf.connect(MNI_2_epi_warp, 'inverse_warp', GM_mask_epiSpace, 'field_file')
    # wf.connect(GM_mask_epiSpace, 'out_file', ds, 'GM_mask_epiSpace')



    # fixme
    # # CREATE TS IN MNI SPACE
    # # is it ok to apply the 2mm warpfield to the 3mm template?
    # # seems ok: https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind0904&L=FSL&P=R14011&1=FSL&9=A&J=on&d=No+Match%3BMatch%3BMatches&z=4
    # epi_bp_MNIspace_3mm = Node(fsl.ApplyWarp(), name='epi_bp_MNIspace_3mm')
    # epi_bp_MNIspace_3mm.inputs.interp = 'spline'
    # epi_bp_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    # wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', epi_bp_MNIspace_3mm, 'ref_file')
    # wf.connect(selectfiles, 'preproc_epi_bp', epi_bp_MNIspace_3mm, 'in_file')
    # wf.connect(selectfiles, 'epi_2_MNI_warp', epi_bp_MNIspace_3mm, 'field_file')


    # CREATE EPI MASK IN MNI SPACE
    epi_mask_MNIspace_3mm = Node(fsl.ApplyWarp(), name='epi_mask_MNIspace_3mm')
    epi_mask_MNIspace_3mm.inputs.interp = 'nn'
    epi_mask_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', epi_mask_MNIspace_3mm, 'ref_file')
    wf.connect(selectfiles, 'epi_mask', epi_mask_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', epi_mask_MNIspace_3mm, 'field_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', ds, 'epi_mask_MNIspace_3mm')


    #####################
    # CALCULATE METRICS
    #####################

    # f/ALFF
    alff = cpac_alff.create_alff('alff')
    alff.inputs.hp_input.hp = 0.01
    alff.inputs.lp_input.lp = 0.1
    wf.connect(selectfiles, 'preproc_epi_full_spectrum', alff, 'inputspec.rest_res')
    # wf.connect(GM_mask_epiSpace, 'out_file', alff, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', alff, 'inputspec.rest_mask')
    wf.connect(alff, 'outputspec.alff_img', ds, 'alff.alff')
    wf.connect(alff, 'outputspec.falff_img', ds, 'alff.falff')



    # f/ALFF 2 MNI
    # fixme spline or default?
    alff_MNIspace_3mm = Node(fsl.ApplyWarp(), name='alff_MNIspace_3mm')
    alff_MNIspace_3mm.inputs.interp = 'spline'
    alff_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', alff_MNIspace_3mm, 'ref_file')
    wf.connect(alff, 'outputspec.alff_img', alff_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', alff_MNIspace_3mm, 'field_file')
    wf.connect(alff_MNIspace_3mm, 'out_file', ds, 'alff.alff_MNI_3mm')

    falff_MNIspace_3mm = Node(fsl.ApplyWarp(), name='falff_MNIspace_3mm')
    falff_MNIspace_3mm.inputs.interp = 'spline'
    falff_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', falff_MNIspace_3mm, 'ref_file')
    wf.connect(alff, 'outputspec.falff_img', falff_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', falff_MNIspace_3mm, 'field_file')
    wf.connect(falff_MNIspace_3mm, 'out_file', ds, 'alff.falff_MNI_3mm')



    # f/ALFF_MNI Z-SCORE
    alff_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='alff_MNIspace_3mm', wf_name='alff_MNIspace_3mm_Z')
    wf.connect(alff_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', alff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(alff_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'alff.alff_MNI_3mm_Z')

    falff_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='falff_MNIspace_3mm', wf_name='falff_MNIspace_3mm_Z')
    wf.connect(falff_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', falff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(falff_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'alff.falff_MNI_3mm_Z')


    # f/ALFF_MNI STANDARDIZE BY MEAN
    alff_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='alff_MNIspace_3mm_standardized_mean')
    wf.connect(alff_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', alff_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(alff_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'alff.alff_MNI_3mm_standardized_mean')

    falff_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='falff_MNIspace_3mm_standardized_mean')
    wf.connect(falff_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', falff_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(falff_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'alff.falff_MNI_3mm_standardized_mean')





    # REHO
    reho = cpac_reho.create_reho()
    reho.inputs.inputspec.cluster_size = 27
    wf.connect(selectfiles, 'preproc_epi_bp', reho, 'inputspec.rest_res_filt')
    # wf.connect(GM_mask_epiSpace, 'out_file', reho, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', reho, 'inputspec.rest_mask')
    wf.connect(reho, 'outputspec.raw_reho_map', ds, 'reho.reho')



    # REHO 2 MNI
    # fixme spline or default?
    reho_MNIspace_3mm = Node(fsl.ApplyWarp(), name='reho_MNIspace_3mm')
    reho_MNIspace_3mm.inputs.interp = 'spline'
    reho_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', reho_MNIspace_3mm, 'ref_file')
    wf.connect(reho, 'outputspec.raw_reho_map', reho_MNIspace_3mm, 'in_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', reho_MNIspace_3mm, 'field_file')
    wf.connect(reho_MNIspace_3mm, 'out_file', ds, 'reho.reho_MNI_3mm')



    # REHO_MNI Z-SCORE
    reho_MNIspace_3mm_Z = cpac_utils.get_zscore(input_name='reho_MNIspace_3mm', wf_name='reho_MNIspace_3mm_Z')
    wf.connect(alff_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', reho_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(reho_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'reho.reho_MNI_3mm_Z')



    # REHO_MNI STANDARDIZE BY MEAN
    reho_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='reho_MNIspace_3mm_standardized_mean')
    wf.connect(reho_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', reho_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(reho_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds, 'reho.reho_MNI_3mm_standardized_mean')



    # VMHC
    # create registration to symmetrical MNI template
    struct_2_MNI_symm = cpac_registration.create_nonlinear_register(name='struct_2_MNI_symm')
    wf.connect(selectfiles_anat_templates, 'vmhc_config_file_2mm', struct_2_MNI_symm, 'inputspec.fnirt_config')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain', struct_2_MNI_symm, 'inputspec.reference_brain')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_skull', struct_2_MNI_symm, 'inputspec.reference_skull')
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain_mask_dil', struct_2_MNI_symm, 'inputspec.ref_mask')
    wf.connect(selectfiles, 't1w', struct_2_MNI_symm, 'inputspec.input_skull')
    wf.connect(selectfiles, 't1w_brain', struct_2_MNI_symm, 'inputspec.input_brain')

    wf.connect(struct_2_MNI_symm, 'outputspec.output_brain', ds, 'vmhc.symm_reg.@output_brain')
    wf.connect(struct_2_MNI_symm, 'outputspec.linear_xfm', ds, 'vmhc.symm_reg.@linear_xfm')
    wf.connect(struct_2_MNI_symm, 'outputspec.invlinear_xfm', ds, 'vmhc.symm_reg.@invlinear_xfm')
    wf.connect(struct_2_MNI_symm, 'outputspec.nonlinear_xfm', ds, 'vmhc.symm_reg.@nonlinear_xfm')



    # fixme
    vmhc = cpac_vmhc.create_vmhc(use_ants=False, name='vmhc')
    vmhc.inputs.fwhm_input.fwhm = 4
    wf.connect(selectfiles_anat_templates, 'vmhc_symm_brain_3mm', vmhc, 'inputspec.standard_for_func')
    wf.connect(selectfiles, 'preproc_epi_bp_tNorm', vmhc, 'inputspec.rest_res')
    wf.connect(selectfiles, 'epi_2_struct_mat', vmhc, 'inputspec.example_func2highres_mat')
    wf.connect(struct_2_MNI_symm, 'outputspec.nonlinear_xfm', vmhc, 'inputspec.fnirt_nonlinear_warp')
    # wf.connect(GM_mask_epiSpace, 'out_file', vmhc, 'inputspec.rest_mask')
    wf.connect(selectfiles, 'epi_mask', vmhc, 'inputspec.rest_mask')

    wf.connect(vmhc, 'outputspec.rest_res_2symmstandard', ds, 'vmhc.rest_res_2symmstandard')
    wf.connect(vmhc, 'outputspec.VMHC_FWHM_img', ds, 'vmhc.VMHC_FWHM_img')
    wf.connect(vmhc, 'outputspec.VMHC_Z_FWHM_img', ds, 'vmhc.VMHC_Z_FWHM_img')
    wf.connect(vmhc, 'outputspec.VMHC_Z_stat_FWHM_img', ds, 'vmhc.VMHC_Z_stat_FWHM_img')



    # VARIABILITY SCORES
    variability = Node(util.Function(input_names=['in_file'],
                                     output_names=['out_file_list'],
                                     function=calc_metrics_utils.calc_variability),
                       name='variability')
    wf.connect(selectfiles, 'preproc_epi_bp', variability, 'in_file')
    wf.connect(variability, 'out_file_list', ds, 'variability.subjectSpace.@out_files')


    # #fixme spline?
    variabilty_MNIspace_3mm = MapNode(fsl.ApplyWarp(), iterfield=['in_file'], name='variabilty_MNIspace_3mm')
    variabilty_MNIspace_3mm.inputs.interp = 'spline'
    variabilty_MNIspace_3mm.plugin_args = {'submit_specs': 'request_memory = 4000'}
    wf.connect(selectfiles_anat_templates, 'FSL_MNI_3mm_template', variabilty_MNIspace_3mm, 'ref_file')
    wf.connect(selectfiles, 'epi_2_MNI_warp', variabilty_MNIspace_3mm, 'field_file')
    wf.connect(variability, 'out_file_list', variabilty_MNIspace_3mm, 'in_file')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', ds, 'variability.MNI_3mm.@out_file')


    # CALC Z SCORE
    variabilty_MNIspace_3mm_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='variabilty_MNIspace_3mm_Z')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_Z, 'inputspec.input_file')
    # wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', variabilty_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_Z, 'inputspec.mask_file')
    wf.connect(variabilty_MNIspace_3mm_Z, 'outputspec.z_score_img', ds, 'variability.MNI_3mm_Z.@out_file')



    # STANDARDIZE BY MEAN
    variabilty_MNIspace_3mm_standardized_mean = calc_metrics_utils.standardize_divide_by_mean(
        wf_name='variabilty_MNIspace_3mm_standardized_mean')
    wf.connect(variabilty_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_standardized_mean, 'inputnode.in_file')
    wf.connect(epi_mask_MNIspace_3mm, 'out_file', variabilty_MNIspace_3mm_standardized_mean, 'inputnode.mask_file')
    wf.connect(variabilty_MNIspace_3mm_standardized_mean, 'outputnode.out_file', ds,
               'variability.MNI_3mm_standardized_mean.@out_file')

    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
예제 #57
0
def calc_centrality_metrics(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    import CPAC.network_centrality.resting_state_centrality as cpac_centrality
    import CPAC.network_centrality.z_score as cpac_centrality_z_score


    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    preprocessed_data_dir = cfg['preprocessed_data_dir']

    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subjects_list = cfg['subjects_list']
    TR_list = cfg['TR_list']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']



    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]
    ds.inputs.regexp_substitutions = [('_subject_id_[A0-9]*/', ''), (
    '_z_score[0-9]*/', '')]  # , #('dc/_TR_id_[0-9]*/', ''), ('evc/_TR_id_[0-9]*/','')]

    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)

    subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
    subjects_infosource.iterables = ('subject_id', subjects_list)

    def add_subject_id_to_ds_dir_fct(subject_id, ds_path):
        import os
        out_path = os.path.join(ds_path, subject_id)
        return out_path

    add_subject_id_to_ds_dir = Node(util.Function(input_names=['subject_id', 'ds_path'],
                                                  output_names=['out_path'],
                                                  function=add_subject_id_to_ds_dir_fct),
                                    name='add_subject_id_to_ds_dir')
    wf.connect(subjects_infosource, 'subject_id', add_subject_id_to_ds_dir, 'subject_id')
    add_subject_id_to_ds_dir.inputs.ds_path = ds_dir

    wf.connect(add_subject_id_to_ds_dir, 'out_path', ds, 'base_directory')


    # get atlas data
    templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                         'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                         'FSL_MNI_3mm_template': 'MNI152_T1_3mm_brain.nii.gz',
                         'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
                         'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
                         'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
                         'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
                         'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
                         }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")


    # GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
    selectfiles_templates = {
        'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
        'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
        'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
        'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
        't1w_brain': '{subject_id}/rsfMRI_preprocessing/struct_prep/t1w_brain/t1w_reoriented_maths.nii.gz',
        'epi_bp_tNorm_MNIspace_3mm': '{subject_id}/rsfMRI_preprocessing/epis_MNI_3mm/03_denoised_BP_tNorm/TR_645/residual_filt_norm_warp.nii.gz'
    }

    selectfiles = Node(nio.SelectFiles(selectfiles_templates,
                                       base_directory=preprocessed_data_dir),
                       name="selectfiles")
    wf.connect(scan_infosource, 'TR_id', selectfiles, 'TR_id')
    wf.connect(subjects_infosource, 'subject_id', selectfiles, 'subject_id')
    # selectfiles.inputs.subject_id = subject_id

    # CREATE TRANSFORMATIONS
    # creat MNI 2 epi warp
    MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
    MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
    wf.connect(selectfiles, 'epi_mask', MNI_2_epi_warp, 'reference')
    wf.connect(selectfiles, 'epi_2_MNI_warp', MNI_2_epi_warp, 'warp')

    #####################
    # CALCULATE METRICS
    #####################

    # DEGREE
    # fixme
    # a_mem = 5
    # fixme
    a_mem = 20
    dc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem,
                                                     wf_name='dc')  # allocated_memory = a_mem, wf_name = 'dc')
    # dc.plugin_args = {'submit_specs': 'request_memory = 6000'}
    # fixme
    dc.plugin_args = {'submit_specs': 'request_memory = 20000'}

    dc.inputs.inputspec.method_option = 0  # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
    dc.inputs.inputspec.threshold_option = 0  # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
    dc.inputs.inputspec.threshold = 0.0001
    dc.inputs.inputspec.weight_options = [True,
                                          True]  # list of two booleans for binarize and weighted options respectively
    wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', dc, 'inputspec.subject')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc, 'inputspec.template')
    wf.connect(dc, 'outputspec.centrality_outputs', ds, 'metrics.centrality.dc.@centrality_outputs')
    wf.connect(dc, 'outputspec.correlation_matrix', ds, 'metrics.centrality.dc.@correlation_matrix')
    wf.connect(dc, 'outputspec.graph_outputs', ds, 'metrics.centrality.dc.@graph_outputs')

    # DC Z-SCORE
    dc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='dc_Z')
    wf.connect(dc, 'outputspec.centrality_outputs', dc_Z, 'inputspec.input_file')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc_Z, 'inputspec.mask_file')
    wf.connect(dc_Z, 'outputspec.z_score_img', ds, 'metrics.centrality.dc_z.@output')

    a_mem = 20
    evc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem, wf_name='evc')
    evc.plugin_args = {'submit_specs': 'request_memory = 20000'}

    evc.inputs.inputspec.method_option = 1  # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
    evc.inputs.inputspec.threshold_option = 0  # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
    evc.inputs.inputspec.threshold = 0.0001
    evc.inputs.inputspec.weight_options = [True,
                                           True]  # list of two booleans for binarize and weighted options respectively
    wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', evc, 'inputspec.subject')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc, 'inputspec.template')
    wf.connect(evc, 'outputspec.centrality_outputs', ds, 'metrics.centrality.evc.@centrality_outputs')
    wf.connect(evc, 'outputspec.correlation_matrix', ds, 'metrics.centrality.evc.@correlation_matrix')
    wf.connect(evc, 'outputspec.graph_outputs', ds, 'metrics.centrality.evc.@graph_outputs')

    # EVC Z-SCORE
    evc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='evc_Z')
    wf.connect(evc, 'outputspec.centrality_outputs', evc_Z, 'inputspec.input_file')
    wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc_Z, 'inputspec.mask_file')
    wf.connect(evc_Z, 'outputspec.z_score_img', ds, 'metrics.centrality.evc_z.@output')

    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
예제 #58
0
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """

        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters["tests"], iters["trains"] = sample_crossvalidation_set(
            result, self.sample_size.default_value
        )
        # Main event
        out_fields = ["T1", "T2", "Label", "trainindex", "testindex"]
        inputsND = Node(
            interface=IdentityInterface(fields=out_fields),
            run_without_submitting=True,
            name="inputs",
        )
        inputsND.iterables = [
            ("trainindex", iters["trains"]),
            ("testindex", iters["tests"]),
        ]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__["t1"]
            inputsND.inputs.Label = csvOut.outputs.__dict__["label"]
            inputsND.inputs.T2 = csvOut.outputs.__dict__["t2"]
            pass  # TODO
        metaflow = Workflow(name="metaflow")
        metaflow.config["execution"] = {
            "plugin": "Linear",
            "stop_on_first_crash": "false",
            "stop_on_first_rerun": "false",
            # This stops at first attempt to rerun, before running, and before deleting previous results.
            "hash_method": "timestamp",
            "single_thread_matlab": "true",  # Multi-core 2011a  multi-core for matrix multiplication.
            "remove_unnecessary_outputs": "true",
            "use_relative_paths": "false",  # relative paths should be on, require hash update when changed.
            "remove_node_directories": "false",  # Experimental
            "local_hash_check": "false",
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainT1s.index"),
                        ("inputs.T1", "trainT1s.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainLabels.index"),
                        ("inputs.Label", "trainLabels.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.testindex", "testT1s.index"),
                        ("inputs.T1", "testT1s.inlist"),
                    ],
                ),
            ]
        )
예제 #59
0
                                               'skullstrip_mask'
                                               ]),
            name='outputnode')

# remove background noise
background = Node(JistIntensityMp2rageMasking(outMasked=True,
                                        outMasked2=True,
                                        outSignal2=True), 
                  name='background')

# skullstrip
strip = Node(MedicAlgorithmSPECTRE2010(outStripped=True,
                                       outMask=True,
                                       outOriginal=True,
                                       inOutput='true',
                                       inFind='true',
                                       #maxMemoryUsage = 6000,
                                       #inMMC=4
                                       ), 
             name='strip')
strip.iterables=('inMMC',[2,4])

# connections
mp2rage.connect([(inputnode, background, [('inv2', 'inSecond'),
                                          ('t1map', 'inQuantitative'),
                                          ('uni', 'inT1weighted')]),
                 (background, strip, [('outMasked2','inInput')]),
                 (background, outputnode, [('outMasked2','uni_masked'),
                                           ('outMasked','t1map_masked'),
                                           ('outSignal2','background_mask')]),
                 (strip, outputnode, [('outStripped','uni_stripped'),
예제 #60
0
파일: syn_test.py 프로젝트: fliem/LeiCA
    ####
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.io as nio
    #from nipype.interfaces.ants.registration import RegistrationSynQuick

    import os

    os.chdir('/Users/franzliem/Desktop/antstest')

    wf = Workflow(name='normalize')
    wf.base_dir = os.path.join('/Users/franzliem/Desktop/antstest/wf')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = '/Users/franzliem/Desktop/antstest/ds'

    s = Node(RegistrationSynQuick(), name='s_rigid')
    s.base_dir = '/Users/franzliem/Desktop/antstest/node_test'
    s.inputs.fixed_image = '/Applications/FSL/data/standard/FMRIB58_FA_1mm.nii.gz'
    s.inputs.moving_image = '/Users/franzliem/Desktop/antstest/dtifit__FA_ero.nii.gz'
    s.inputs.output_prefix = 'ants_mni'
    s.inputs.num_threads=4
    # s.inputs.use_histogram_matching = True
    # s.inputs.transform_type = 'r'
    # s.inputs.histogram_bins = 44
    # s.inputs.spline_distance = 15
    # s.inputs.precision_type = 'double'
    for f in ['warped_image', 'out_matrix', 'forward_warp_field', 'inverse_warp_field']:
        wf.connect(s, f, ds, f)

    wf.run()