Exemple #1
0
def create_CT_MRI_coregistration_wf(name="coreg_wf"):
    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(interface=util.IdentityInterface(fields=["T1", "CT"]),
                        name="inputnode")
    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=["conformed_T1", "conformed_CT"]),
                         name="outputnode")

    conform_T1 = pe.Node(interface=fs.MRIConvert(), name="conform_T1")
    conform_T1.inputs.conform = True

    fix_CT_centroid = pe.Node(interface=fs.MRIConvert(),
                              name="fix_CT_centroid")
    fix_CT_centroid.inputs.no_change = True
    fix_CT_centroid.inputs.conform = True

    workflow.connect([(inputnode, conform_T1, [("T1", "in_file")])])
    workflow.connect([(conform_T1, outputnode, [("out_file", "conformed_T1")])
                      ])

    workflow.connect([(inputnode, fix_CT_centroid, [("CT", "in_file")])])
    workflow.connect([(conform_T1, fix_CT_centroid, [("out_file",
                                                      "reslice_like")])])
    workflow.connect([(fix_CT_centroid, outputnode, [("out_file",
                                                      "conformed_CT")])])
Exemple #2
0
def freesurfer_nifti():
    '''
    Simple method to convert freesurfer mgz files to nifti format
    '''

    #start with a useful function to grab data
    #define workflow
    flow = Workflow(name='freesurfer_nifti')

    inputnode = Node(
        util.IdentityInterface(fields=['mgz_image', 'anatomical']),
        name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['aparc_aseg_nifti']),
                      name='outputnode')

    #define nodes
    convert_aparc_aseg = Node(interface=freesurfer.MRIConvert(),
                              name='aparc_aseg_nifti')
    convert_aparc_aseg.inputs.out_type = 'nii'

    anatomical = Node(interface=freesurfer.MRIConvert(),
                      name='anatomical_ready')
    anatomical.inputs.out_type = 'nii'

    #connect nodes
    return flow
Exemple #3
0
def create_mgzconvert_pipeline(name='mgzconvert'):
    # workflow
    mgzconvert = Workflow(name='mgzconvert')
    # inputnode
    inputnode = Node(
        util.IdentityInterface(fields=['fs_subjects_dir', 'fs_subject_id']),
        name='inputnode')
    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        'anat_head', 'anat_brain', 'anat_brain_mask', 'wmseg', 'wmedge'
    ]),
                      name='outputnode')
    # import files from freesurfer
    fs_import = Node(interface=nio.FreeSurferSource(), name='fs_import')
    # convert Freesurfer T1 file to nifti
    head_convert = Node(fs.MRIConvert(out_type='niigz', out_file='T1.nii.gz'),
                        name='head_convert')

    # create brainmask from aparc+aseg with single dilation
    def get_aparc_aseg(files):
        for name in files:
            if 'aparc+aseg' in name:
                return name

    # create brain by converting only freesurfer output
    brain_convert = Node(fs.MRIConvert(out_type='niigz',
                                       out_file='brain.nii.gz'),
                         name='brain_convert')
    brain_binarize = Node(fsl.ImageMaths(op_string='-bin -fillh',
                                         out_file='T1_brain_mask.nii.gz'),
                          name='brain_binarize')

    # cortical and cerebellar white matter volumes to construct wm edge
    # [lh cerebral wm, lh cerebellar wm, rh cerebral wm, rh cerebellar wm, brain stem]
    wmseg = Node(fs.Binarize(out_type='nii.gz',
                             match=[2, 7, 41, 46, 16],
                             binary_file='T1_brain_wmseg.nii.gz'),
                 name='wmseg')
    # make edge from wmseg to visualize coregistration quality
    edge = Node(fsl.ApplyMask(args='-edge -bin',
                              out_file='T1_brain_wmedge.nii.gz'),
                name='edge')
    # connections
    mgzconvert.connect([
        (inputnode, fs_import, [('fs_subjects_dir', 'subjects_dir'),
                                ('fs_subject_id', 'subject_id')]),
        (fs_import, head_convert, [('T1', 'in_file')]),
        (fs_import, wmseg, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
        (fs_import, brain_convert, [('brainmask', 'in_file')]),
        (wmseg, edge, [('binary_file', 'in_file'),
                       ('binary_file', 'mask_file')]),
        (head_convert, outputnode, [('out_file', 'anat_head')]),
        (brain_convert, outputnode, [('out_file', 'anat_brain')]),
        (brain_convert, brain_binarize, [('out_file', 'in_file')]),
        (brain_binarize, outputnode, [('out_file', 'anat_brain_mask')]),
        (wmseg, outputnode, [('binary_file', 'wmseg')]),
        (edge, outputnode, [('out_file', 'wmedge')])
    ])

    return mgzconvert
def genrDcm2Mgz(dcm, oDir, series_desc, series_id):
    if not os.path.exists(oDir):
        opStr = ['mksubjdirs', oDir]
        sp.call(opStr)
    #set the output name
    oFile = os.path.join(oDir, 'mri', 'orig', '001.mgz')
    #niiOutName = seq_dict[series_desc] + '_' + str(series_id)
    if os.path.exists(os.path.join(oFile)) and not overwriteExisting:
        return
    if convetIdc:
        if not os.path.exists('tmp'):
            os.makedirs('tmp')
        tmpFile = os.path.join('tmp', os.path.basename(oDir))
        dcm2Mgz = freesurfer.MRIConvert(in_file=dcm, out_file=tmpFile, out_type = 'mgz', terminal_output='stream')
        dcm2Mgz.run()
        dcm2Mgz = freesurfer.MRIConvert(in_file=tmpFile, out_file=oFile, out_type = 'mgz', terminal_output='stream')
    else:
        #run the dcm2nii conversion
        dcm2Mgz = freesurfer.MRIConvert(in_file=dcm, out_file=oFile, out_type = 'mgz', terminal_output='stream')
    #print dcmCvt.cmdline
    dcm2Mgz.run()
    addcmd = True
    if convertIdc:
        #remove any instance of R-number...
        addcmd = False
    logFile = os.path.join(oDir, 'scripts', str(series_id) + '_' + str(series_desc) + '_mriConvert' + '.log')
    write_log(dcmCvt.cmdline, logFile, addcmd, [series_id, series_desc, quality])
Exemple #5
0
def preprocess_CT_wf(name="preproc_CT"):
    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(interface=util.IdentityInterface(fields=["CT", "T1"]),
                        name="inputnode")
    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=["conformed_T1", "conformed_CT"]),
                         name="outputnode")

    fixed_image = op.abspath("conform_MNI_MoutBija_20090622.nii")
    moving_image = op.abspath("conf_OrigCT.nii")

    conform_T1 = pe.Node(interface=fs.MRIConvert(), name="conform_T1")
    conform_T1.inputs.conform = True

    conform_CT = pe.Node(interface=fs.MRIConvert(), name="conform_CT")
    conform_CT.inputs.no_change = True
    conform_CT.inputs.conform = True

    register_CT = pe.Node(interface=ants.Registration(), name="register_CT")
    register_CT.inputs.fixed_image = [fixed_image]
    register_CT.inputs.moving_image = [moving_image]
    register_CT.inputs.output_transform_prefix = "Test2_output"
    register_CT.inputs.transforms = ['Translation', 'Rigid']
    register_CT.inputs.transform_parameters = [(0.1, ), (0.1, )]
    register_CT.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 2)
    register_CT.inputs.dimension = 3
    register_CT.inputs.write_composite_transform = True
    register_CT.inputs.collapse_output_transforms = False
    register_CT.inputs.metric = ['Mattes'] * 2
    register_CT.inputs.metric_weight = [1] * 2
    register_CT.inputs.radius_or_number_of_bins = [32] * 2
    register_CT.inputs.sampling_strategy = ['Regular'] * 2
    register_CT.inputs.sampling_percentage = [0.3] * 2
    register_CT.inputs.convergence_threshold = [1.e-8] * 2
    register_CT.inputs.convergence_window_size = [20] * 2
    register_CT.inputs.smoothing_sigmas = [[4, 2, 1]] * 2
    register_CT.inputs.sigma_units = ['vox'] * 2
    register_CT.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]]
    register_CT.inputs.use_estimate_learning_rate_once = [True] * 2
    register_CT.inputs.use_histogram_matching = [False] * 2
    register_CT.inputs.initial_moving_transform_com = True
    register_CT.inputs.output_warped_image = 'conf_CT_fixed.nii'

    workflow.connect([(inputnode, conform_CT, [("CT", "in_file")])])
    workflow.connect([(conform_CT, register_CT, [("out_file", "moving_image")])
                      ])

    workflow.connect([(inputnode, conform_T1, [("T1", "in_file")])])
    workflow.connect([(conform_T1, outputnode, [("out_file", "conformed_T1")])
                      ])
    workflow.connect([(inputnode, register_CT, [("T1", "fixed_image")])])
    workflow.connect([(register_CT, outputnode, [("out_file", "skin_mask")])])
    return workflow
Exemple #6
0
    def skull_stripping_wf(self):
        # mri_convert --in_type nii --out_type mgz --input_volume $tmpdir/${subj_str}_roi.nii.gz --output_volume $tmpdir/${subj_str}_cut.mgz"
        conv_mgz = Node(interface=fs.MRIConvert(in_type='nii', out_type='mgz'),
                        name='conv_mgz')

        # mri_watershed -atlas $tmpdir/${subj_str}_cut.mgz $tmpdir/${subj_str}_skullstrip.mgz
        watershed = Node(interface=fs.WatershedSkullStrip(), name='watershed')

        # mri_convert -i $tmpdir/${subj_str}_skullstrip.mgz -o $tmpdir/${subj_str}_fertig.nii.gz
        conv_nii = Node(interface=fs.MRIConvert(in_type='mgz', out_type='nii'),
                        name='conv_nii')

        # bet $tmpdir/${subj_str}_fertig.nii.gz $brainsdir/${subj_str}_brain.nii.gz -m -R -c $cog -f 0.5
        bet = Node(interface=fsl.BET(robust=True, mask=True, frac=0.35),
                   name='bet')

        skull_stripping = Workflow(name='skull_stripping',
                                   base_dir=self.out_dir)
        skull_stripping.connect(conv_mgz, 'out_file', watershed, 'in_file')
        skull_stripping.connect(watershed, 'out_file', conv_nii, 'in_file')
        skull_stripping.connect(conv_nii, 'out_file', bet, 'in_file')
        return skull_stripping
def create_get_T1_brainmask(name='get_T1_brainmask'):

    get_T1_brainmask = Workflow(name='get_T1_brainmask')
    # Define nodes
    inputnode = Node(util.IdentityInterface(fields=[
        'fs_subjects_dir',
        'fs_subject_id',
    ]),
                     name='inputnode')

    outputnode = Node(
        interface=util.IdentityInterface(fields=['T1', 'brain_mask']),
        name='outputnode')

    # import files from freesurfer
    fs_import = Node(interface=nio.FreeSurferSource(), name='fs_import')

    #transform to nii
    convert_mask = Node(interface=fs.MRIConvert(), name="convert_mask")
    convert_mask.inputs.out_type = "niigz"
    convert_T1 = Node(interface=fs.MRIConvert(), name="convert_T1")
    convert_T1.inputs.out_type = "niigz"

    #binarize brain mask (like done in Lemon_Scripts_mod/struct_preproc/mgzconvert.py)
    brain_binarize = Node(fsl.ImageMaths(op_string='-bin',
                                         out_file='T1_brain_mask.nii.gz'),
                          name='brain_binarize')

    get_T1_brainmask.connect([
        (inputnode, fs_import, [('fs_subjects_dir', 'subjects_dir'),
                                ('fs_subject_id', 'subject_id')]),
        (fs_import, convert_mask, [('brainmask', 'in_file')]),
        (fs_import, convert_T1, [('T1', 'in_file')]),
        (convert_mask, brain_binarize, [('out_file', 'in_file')]),
        (brain_binarize, outputnode, [('out_file', 'brain_mask')]),
        (convert_T1, outputnode, [('out_file', 'T1')])
    ])

    return get_T1_brainmask
Exemple #8
0
def conformAll(base_name):
    import os.path as op
    import nipype.interfaces.freesurfer as fs
    from nipype.utils.filemanip import split_filename
    import glob
    list_of_exts = ["T1"    , "T1brain", "PreCoTh_rois",
    "fdgpet_reorient", "wmmask_reorient" , "cortex_reorient",
    "fdgpet", "wmmask" , "cortex"]
    out_files = []

    print("Base name: '%s'" % base_name)

    try:
        os.mkdir('Original')
    except OSError:
        print("Directory exists")

    for filetype in list_of_exts:
        print("Searching for '%s'" % filetype)
        search_str = op.abspath(base_name + "*" + filetype + ".*")
        in_file = glob.glob(search_str)
        if in_file and len(in_file) == 1:
            in_file = in_file[0]
            print("Found %s" % in_file)
            _, name, ext = split_filename(in_file)
            out_file = op.abspath("conform_" + name + ".nii.gz")
            if filetype == "fdgpet" or filetype == "wmmask" or filetype == "cortex":
                cmd = 'mv %s %s' % (in_file, op.join(op.abspath("Original"), name + ext))
                os.system(cmd)
                print(cmd)
            else:
                conv = fs.MRIConvert()
                conv.inputs.conform = True
                conv.inputs.no_change = True
                if filetype == "PreCoTh_rois" or filetype == "wmmask_reorient" or filetype == "cortex_reorient":
                    conv.inputs.resample_type = 'nearest'
                conv.inputs.in_file = in_file
                conv.inputs.out_file = out_file
                conv.run()
                cmd = 'mv %s %s' % (in_file, op.join(op.abspath("Original"), name + ext))
                os.system(cmd)
                print(cmd)
            out_files.append(out_file)
        elif len(in_file) > 1:
            print("Multiple files found using %s" % search_str)
        else:
            print("Couldn't find anything using %s" % search_str)

    print("Successfully conformed %d files" % len(out_files))
    print(out_files)
    return out_files
    def make_freesurfer(self):

        # Ref: http://nipype.readthedocs.io/en/1.0.4/interfaces/generated/interfaces.freesurfer/preprocess.html#reconall
        fs_recon1 = Node(interface=fs.ReconAll(directive='autorecon1',
                                               mris_inflate='-n 15',
                                               hires=True,
                                               mprage=True,
                                               openmp=self.omp_nthreads),
                         name='fs_recon1',
                         n_procs=self.omp_nthreads)
        fs_mriconv = Node(interface=fs.MRIConvert(out_type='mgz'),
                          name='fs_mriconv')
        fs_vol2vol = Node(interface=fs.ApplyVolTransform(mni_152_reg=True),
                          name='fs_vol2vol')
        fs_mrimask = Node(interface=fs.ApplyMask(), name='fs_mrimask')
        fs_recon2 = Node(interface=fs.ReconAll(directive='autorecon2',
                                               hires=True,
                                               mprage=True,
                                               hippocampal_subfields_T1=False,
                                               openmp=self.omp_nthreads),
                         name='fs_recon2',
                         n_procs=self.omp_nthreads)

        fs_recon3 = Node(interface=fs.ReconAll(directive='autorecon3',
                                               hires=True,
                                               mprage=True,
                                               hippocampal_subfields_T1=False,
                                               openmp=self.omp_nthreads),
                         name='fs_recon3',
                         n_procs=self.omp_nthreads)

        copy_brainmask = Node(Function(['in_file', 'fs_dir'], ['fs_dir'],
                                       self.copy_mask),
                              name='copy_brainmask')
        segment_hp = Node(interface=SegmentHA_T1(), name='segment_hp')

        freesurfer = Workflow(name='freesurfer', base_dir=self.temp_dir)
        freesurfer.connect(fs_recon1, 'T1', fs_vol2vol, 'target_file')
        freesurfer.connect(fs_mriconv, 'out_file', fs_vol2vol, 'source_file')
        freesurfer.connect(fs_recon1, 'T1', fs_mrimask, 'in_file')
        freesurfer.connect(fs_vol2vol, 'transformed_file', fs_mrimask,
                           'mask_file')
        freesurfer.connect(fs_mrimask, 'out_file', copy_brainmask, 'in_file')
        freesurfer.connect(fs_recon1, 'subjects_dir', copy_brainmask, 'fs_dir')
        freesurfer.connect(copy_brainmask, 'fs_dir', fs_recon2, 'subjects_dir')
        freesurfer.connect(fs_recon2, 'subjects_dir', fs_recon3,
                           'subjects_dir')
        freesurfer.connect(fs_recon3, 'subjects_dir', segment_hp,
                           'subjects_dir')

        return freesurfer
def results(cenc_participant_id,  cenc_participant_dir, cenc_freesurfer_dir,cenc_results_dir, verbose):

     util.mkcd_dir( [ cenc_results_dir ], True)

     files_to_convert = [ os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'nu.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'brainmask.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aparc.a2009s+aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'wmparc.mgz')
                          ]

     # Check if files exist

     print files_to_convert

     if util.check_files(files_to_convert, True) == False:
          sys.exit()

     # Create link to directory

     freesurfer_results_dir =  os.path.abspath(os.path.join( cenc_participant_dir, 'freesurfer','results'))

     if not os.path.exists(freesurfer_results_dir):
          util.force_symbolic_link( os.path.join( cenc_freesurfer_dir,  cenc_participant_id ), freesurfer_results_dir)

     # TODO use input node to run this instead of a loop. The trick part is to have the files named correctly. 

     for ii in files_to_convert:
          mc = fs.MRIConvert( in_file  = ii,
                              out_file = os.path.join( cenc_results_dir, str.replace( os.path.basename(ii),'.mgz','.nii.gz')),
                              out_type = 'niigz'
                              )
          mc.run()

          
          reorient = fsl.Reorient2Std( in_file = mc.inputs.out_file, out_file = mc.inputs.out_file)
          reorient.run()
     
     # Create final brain mask. 

     cenc.create_mask( os.path.join( cenc_results_dir, 'brainmask.nii.gz'),         
                       os.path.join( cenc_results_dir, 'aparc.a2009s+aseg.nii.gz'), 
                       os.path.join( cenc_results_dir, 'mask.nii.gz')
                       )

     # Extract labels for Further processing

     cenc_results_labels_dir = os.path.join(cenc_results_dir, 'labels')
     util.mkcd_dir( cenc_results_labels_dir, 'labels') )
Exemple #11
0
def results(cenc_participant_id,  cenc_participant_dir, cenc_freesurfer_dir,cenc_results_dir, verbose):

     util.mkcd_dir( [ cenc_results_dir ], True)

     files_to_convert = [ os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'nu.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'brainmask.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'aparc.a2009s+aseg.mgz'),
                          os.path.join( cenc_freesurfer_dir, cenc_participant_id, 'mri', 'wmparc.mgz')
                          ]

     # Check if files exist

     print files_to_convert

     if util.check_files(files_to_convert, True) == False:
          sys.exit()

     # Create link to directory

     freesurfer_results_dir =  os.path.abspath(os.path.join( cenc_participant_dir, 'freesurfer','results'))

     if not os.path.exists(freesurfer_results_dir):
          util.force_symbolic_link( os.path.join( cenc_freesurfer_dir,  cenc_participant_id ), freesurfer_results_dir)

     # TODO use input node to run this instead of a loop

     mc = Node( fs.MRIConvert( out_type = 'niigz'
                               ),
                name="mri_convert"
                )  
       
     mc.iterables = ( "in_file", files_to_convert )

     reorient = Node( fsl.Reorient2Std(), name="reorient" )

     workflow_convert = Workflow(name='cenc_freesurfer_nipype_workflow')
     workflow_convert.base_dir = cenc_results_dir
     
     workflow_convert.connect( [ (mc,       reorient, [('out_file', 'in_file')] )]
                               )    
     workflow_convert.run()
     
     # Create final brain mask. This takes forever. Speeding it up would be helpful. 

     cenc.create_mask( os.path.join( cenc_results_dir, 'brainmask.nii.gz'),         
                       os.path.join( cenc_results_dir, 'aparc.a2009s+aseg.nii.gz'), 
                       os.path.join( cenc_results_dir, 'mask.nii.gz')
                       )
Exemple #12
0
def create_dtb_tracking_flow(config):
    flow = pe.Workflow(name="tracking")
    
    # inputnode
    inputnode = pe.Node(interface=util.IdentityInterface(fields=["DWI","wm_mask_registered"]),name="inputnode")
    
    # outputnode
    outputnode = pe.Node(interface=util.IdentityInterface(fields=["track_file"]),name="outputnode")
    
    # Prepare data for tractography algorithm
    dtb_dtk2dir = pe.Node(interface=DTB_dtk2dir(), name="dtb_dtk2dir")
    if config.imaging_model == 'DSI':
        dtb_dtk2dir.inputs.diffusion_type = 'dsi'
        dtb_dtk2dir.inputs.dirlist = pkg_resources.resource_filename('cmtklib',os.path.join('data','diffusion','odf_directions','181_vecs.dat'))
        prefix = 'dsi'
    if config.imaging_model == 'DTI':
        dtb_dtk2dir.inputs.diffusion_type = 'dti'
        prefix = 'dti'
    if config.imaging_model == 'HARDI':
        dtb_dtk2dir.inputs.diffusion_type = 'dsi'
        dtb_dtk2dir.inputs.dirlist = pkg_resources.resource_filename('cmtklib',os.path.join('data','diffusion','odf_directions','181_vecs.dat'))
        prefix = 'hardi'
    if 'x' in config.flip_input:
        dtb_dtk2dir.inputs.invert_x = True
    if 'y' in config.flip_input:
        dtb_dtk2dir.inputs.invert_y = True
    if 'z' in config.flip_input:
        dtb_dtk2dir.inputs.invert_z = True
   
    fs_mriconvert = pe.Node(interface=fs.MRIConvert(out_type='nii', vox_size=(1,1,1), 
                            out_datatype='uchar', out_file='fsmask_1mm.nii'), name="fs_mriconvert")

    # Streamline AND filtering (to avoid temp files)
    streamline_filter = pe.Node(interface=StreamlineAndFilter(out_file='streamline.trk'), name="dtb_streamline")
    streamline_filter.inputs.angle = config.angle
    streamline_filter.inputs.step_size = config.step_size
    streamline_filter.inputs.seeds = config.seeds
    
    # Workflow connections
    flow.connect([
                 (inputnode,dtb_dtk2dir, [(('DWI',strip_suffix,prefix),'prefix')]),
                 (inputnode,fs_mriconvert, [('wm_mask_registered','in_file')]),
                 (dtb_dtk2dir,streamline_filter, [('out_file','dir_file')]),
                 (fs_mriconvert,streamline_filter, [('out_file','wm_mask')]),
                 (streamline_filter,outputnode, [('out_file','track_file')]),
                 ])
        
    return flow
Exemple #13
0
    def create_workflow(self, flow, inputnode, outputnode):
        if self.config.seg_tool == "Freesurfer":
            # Converting to .mgz format
            fs_mriconvert = pe.Node(interface=fs.MRIConvert(out_type="mgz",
                                                            out_file="T1.mgz"),
                                    name="mgz_convert")

            if self.config.make_isotropic:
                fs_mriconvert.inputs.vox_size = (
                    self.config.isotropic_vox_size,
                    self.config.isotropic_vox_size,
                    self.config.isotropic_vox_size)
                fs_mriconvert.inputs.resample_type = self.config.isotropic_interpolation

            rename = pe.Node(util.Rename(), name="copy_orig")
            orig_dir = os.path.join(self.config.freesurfer_subject_id, "mri",
                                    "orig")
            if not os.path.exists(orig_dir):
                os.makedirs(orig_dir)
                print "Folder not existing; %s created!" % orig_dir
            rename.inputs.format_string = os.path.join(orig_dir, "001.mgz")

            # ReconAll => named outputnode as we don't want to select a specific output....
            fs_reconall = pe.Node(interface=fs.ReconAll(
                flags='-no-isrunning -parallel -openmp {}'.format(
                    self.config.fs_number_of_cores)),
                                  name="reconall")
            fs_reconall.inputs.directive = 'all'
            #fs_reconall.inputs.args = self.config.freesurfer_args

            #fs_reconall.inputs.subjects_dir and fs_reconall.inputs.subject_id set in cmp/pipelines/diffusion/diffusion.py
            fs_reconall.inputs.subjects_dir = self.config.freesurfer_subjects_dir

            # fs_reconall.inputs.hippocampal_subfields_T1 = self.config.segment_hippocampal_subfields
            # fs_reconall.inputs.brainstem = self.config.segment_brainstem

            def isavailable(file):
                print "T1 is available"
                return file

            flow.connect([
                (inputnode, fs_mriconvert, [(('T1', isavailable), 'in_file')]),
                (fs_mriconvert, rename, [('out_file', 'in_file')]),
                (rename, fs_reconall, [(("out_file", extract_base_directory),
                                        "subject_id")]),
                (fs_reconall, outputnode, [('subjects_dir', 'subjects_dir'),
                                           ('subject_id', 'subject_id')]),
            ])
Exemple #14
0
    def create_workflow(self, flow, inputnode, outputnode):
        if self.config.seg_tool == "Freesurfer":
            if self.config.use_existing_freesurfer_data == False:
                # Converting to .mgz format
                fs_mriconvert = pe.Node(interface=fs.MRIConvert(
                    out_type="mgz", out_file="T1.mgz"),
                                        name="mgz_convert")

                rename = pe.Node(util.Rename(), name="copy_orig")
                orig_dir = os.path.join(self.config.freesurfer_subject_id,
                                        "mri", "orig")
                if not os.path.exists(orig_dir):
                    os.makedirs(orig_dir)
                    print "Folder not existing; %s created!" % orig_dir
                rename.inputs.format_string = os.path.join(orig_dir, "001.mgz")

                # ReconAll => named outputnode as we don't want to select a specific output....
                fs_reconall = pe.Node(
                    interface=fs.ReconAll(flags='-no-isrunning'),
                    name="reconall")
                fs_reconall.inputs.args = self.config.freesurfer_args

                #fs_reconall.inputs.subjects_dir and fs_reconall.inputs.subject_id set in cmp/pipelines/diffusion/diffusion.py
                fs_reconall.inputs.subjects_dir = self.config.freesurfer_subjects_dir

                def isavailable(file):
                    print "T1 is available"
                    return file

                flow.connect([
                    (inputnode, fs_mriconvert, [(('T1', isavailable),
                                                 'in_file')]),
                    (fs_mriconvert, rename, [('out_file', 'in_file')]),
                    (rename, fs_reconall,
                     [(("out_file", extract_base_directory), "subject_id")]),
                    (fs_reconall, outputnode, [('subjects_dir',
                                                'subjects_dir'),
                                               ('subject_id', 'subject_id')]),
                ])

            else:
                outputnode.inputs.subjects_dir = self.config.freesurfer_subjects_dir
                outputnode.inputs.subject_id = self.config.freesurfer_subject_id

        elif self.config.seg_tool == "Custom segmentation":

            outputnode.inputs.custom_wm_mask = self.config.white_matter_mask
    def freesurfer_to_anat(cls):
        """Function which removes freesurfer padding and transforms freesurfer segmentation to native space."""
        if config.verbose:
            print('Aligning freesurfer file to anatomical native space.')
        # Rawavg is in native anatomical space, so align to this file. vol_label_file defines output file name.
        native_segmented_brain = freesurfer.Label2Vol(seg_file='freesurfer/mri/aseg.auto_noCCseg.mgz',
                                                      template_file='freesurfer/mri/rawavg.mgz',
                                                      vol_label_file='freesurfer/mri/native_segmented_brain.mgz',
                                                      reg_header='freesurfer/mri/aseg.auto_noCCseg.mgz',
                                                      terminal_output='none')
        native_segmented_brain.run()

        mgz_to_nii = freesurfer.MRIConvert(in_file='freesurfer/mri/native_segmented_brain.mgz',
                                           out_file='freesurfer/mri/native_segmented_brain.nii',
                                           out_type='nii',
                                           terminal_output='none')
        mgz_to_nii.run()
def create_images_workflow():
    # Correct for the sphinx position and use reorient to standard.
    workflow = Workflow(name='minimal_proc')

    inputs = Node(IdentityInterface(fields=['images']), name="in")
    outputs = Node(IdentityInterface(fields=['images']), name="out")

    sphinx = MapNode(fs.MRIConvert(sphinx=True),
                     iterfield=['in_file'],
                     name='sphinx')

    workflow.connect(inputs, 'images', sphinx, 'in_file')

    ro = MapNode(fsl.Reorient2Std(), iterfield=['in_file'], name='ro')

    workflow.connect(sphinx, 'out_file', ro, 'in_file')
    workflow.connect(ro, 'out_file', outputs, 'images')

    return workflow
Exemple #17
0
def create_normalization_wf(transformations=["mni2func"]):
    wf = pe.Workflow(name="normalization")
    inputspec = pe.Node(util.IdentityInterface(fields=[
        'T1', 'skullstripped_T1', 'preprocessed_epi', 'func2anat_transform'
    ]),
                        name="inputspec")

    anat2mni = create_nonlinear_register("anat2mni")
    linear_reg = anat2mni.get_node("linear_reg_0")
    linear_reg.inputs.searchr_x = [-180, 180]
    linear_reg.inputs.searchr_y = [-180, 180]
    linear_reg.inputs.searchr_z = [-180, 180]

    skull_mgz2nii = pe.Node(fs.MRIConvert(out_type="nii"),
                            name="skull_mgs2nii")
    brain_mgz2nii = skull_mgz2nii.clone(name="brain_mgs2nii")
    wf.connect(inputspec, "skullstripped_T1", brain_mgz2nii, "in_file")
    wf.connect(inputspec, "T1", skull_mgz2nii, "in_file")

    anat2mni.inputs.inputspec.reference_skull = fsl.Info.standard_image(
        "MNI152_T1_2mm.nii.gz")
    anat2mni.inputs.inputspec.reference_brain = fsl.Info.standard_image(
        "MNI152_T1_2mm_brain.nii.gz")
    anat2mni.inputs.inputspec.fnirt_config = "T1_2_MNI152_2mm"
    wf.connect(skull_mgz2nii, "out_file", anat2mni, "inputspec.input_skull")
    wf.connect(brain_mgz2nii, "out_file", anat2mni, "inputspec.input_brain")

    if 'mni2func' in transformations:
        invert_warp = pe.Node(fsl.InvWarp(), name="invert_warp")
        wf.connect(anat2mni, "outputspec.nonlinear_xfm", invert_warp,
                   "warp_file")
        wf.connect(skull_mgz2nii, "out_file", invert_warp, "ref_file")

    if 'func2mni' in transformations:
        mni_warp = pe.Node(interface=fsl.ApplyWarp(), name='mni_warp')
        mni_warp.inputs.ref_file = fsl.Info.standard_image(
            "MNI152_T1_2mm.nii.gz")
        wf.connect(inputspec, 'preprocessed_epi', mni_warp, 'in_file')
        wf.connect(anat2mni, 'outputspec.nonlinear_xfm', mni_warp,
                   'field_file')
        wf.connect(inputspec, 'func2anat_transform', mni_warp, 'premat')

    return wf
def extract_label(path, label_id, output_name):
    files2extract = glob.glob(path)
    for subj in range(len(files2extract)):
        print('extracting ', output_name, ' for subject ', subj)
        subj_aparc = files2extract[subj]
        mc = freesurfer.MRIConvert()
        mc.inputs.in_file = subj_aparc
        mc.inputs.out_file = os.path.join(
            mc.inputs.in_file.split('.')[0] + mc.inputs.in_file.split('.')[1] +
            '.nii.gz')
        mc.run()

        calc = afni.Calc()
        calc.inputs.in_file_a = os.path.join(
            mc.inputs.in_file.split('.')[0] + mc.inputs.in_file.split('.')[1] +
            '.nii.gz')
        calc.inputs.expr = 'amongst(a,' + str(label_id) + ')'
        calc.inputs.out_file = os.path.join(
            os.path.dirname(mc.inputs.in_file) + '/' + output_name + '.nii.gz')
        calc.run()
Exemple #19
0
def create_custom_template(c):
    import nipype.pipeline.engine as pe
    #from nipype.interfaces.ants import BuildTemplate
    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as niu
    import nipype.interfaces.freesurfer as fs

    wf = pe.Workflow(name='create_fs_masked_brains')
    #temp = pe.Node(BuildTemplate(parallelization=1), name='create_template')
    fssource = pe.Node(nio.FreeSurferSource(subjects_dir=c.surf_dir),
                       name='fssource')
    infosource = pe.Node(niu.IdentityInterface(fields=["subject_id"]),
                         name="subject_names")
    infosource.iterables = ("subject_id", c.subjects)
    wf.connect(infosource, "subject_id", fssource, "subject_id")
    sink = pe.Node(nio.DataSink(base_directory=c.sink_dir), name='sinker')
    applymask = pe.Node(fs.ApplyMask(mask_thresh=0.5), name='applymask')
    binarize = pe.Node(fs.Binarize(dilate=1, min=0.5, subjects_dir=c.surf_dir),
                       name='binarize')
    convert = pe.Node(fs.MRIConvert(out_type='niigz'), name='convert')
    wf.connect(fssource, 'orig', applymask, 'in_file')
    wf.connect(fssource, ('aparc_aseg', pickaparc), binarize, 'in_file')
    wf.connect(binarize, 'binary_file', applymask, 'mask_file')
    wf.connect(applymask, 'out_file', convert, 'in_file')
    wf.connect(convert, "out_file", sink, "masked_images")

    def getsubs(subject_id):
        subs = []
        subs.append(('_subject_id_%s/' % subject_id, '%s_' % subject_id))
        return subs

    wf.connect(infosource, ("subject_id", getsubs), sink, "substitutions")
    #wf.connect(convert,'out_file',temp,'in_files')
    #wf.connect(temp,'final_template_file',sink,'custom_template.final_template_file')
    #wf.connect(temp,'subject_outfiles',sink,'custom_template.subject_outfiles')
    #wf.connect(temp,'template_files',sink,'template_files')
    return wf
Exemple #20
0
def init_segs_to_native_wf(name='segs_to_native', segmentation='aseg'):
    """
    Get a segmentation from FreeSurfer conformed space into native T1w space.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from smriprep.workflows.surfaces import init_segs_to_native_wf
        wf = init_segs_to_native_wf()


    **Parameters**
        segmentation
            The name of a segmentation ('aseg' or 'aparc_aseg' or 'wmparc')

    **Inputs**

        in_file
            Anatomical, merged T1w image after INU correction
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID


    **Outputs**

        out_file
            The selected segmentation, after resampling in native space
    """
    workflow = Workflow(name='%s_%s' % (name, segmentation))
    inputnode = pe.Node(niu.IdentityInterface(
        ['in_file', 'subjects_dir', 'subject_id']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(['out_file']),
                         name='outputnode')
    # Extract the aseg and aparc+aseg outputs
    fssource = pe.Node(nio.FreeSurferSource(), name='fs_datasource')
    tonative = pe.Node(fs.Label2Vol(), name='tonative')
    tonii = pe.Node(fs.MRIConvert(out_type='niigz', resample_type='nearest'),
                    name='tonii')

    if segmentation.startswith('aparc'):
        if segmentation == 'aparc_aseg':

            def _sel(x):
                return [parc for parc in x if 'aparc+' in parc][0]
        elif segmentation == 'aparc_a2009s':

            def _sel(x):
                return [parc for parc in x if 'a2009s+' in parc][0]
        elif segmentation == 'aparc_dkt':

            def _sel(x):
                return [parc for parc in x if 'DKTatlas+' in parc][0]

        segmentation = (segmentation, _sel)

    workflow.connect([
        (inputnode, fssource, [('subjects_dir', 'subjects_dir'),
                               ('subject_id', 'subject_id')]),
        (inputnode, tonii, [('in_file', 'reslice_like')]),
        (fssource, tonative, [(segmentation, 'seg_file'),
                              ('rawavg', 'template_file'),
                              ('aseg', 'reg_header')]),
        (tonative, tonii, [('vol_label_file', 'in_file')]),
        (tonii, outputnode, [('out_file', 'out_file')]),
    ])
    return workflow
Exemple #21
0
def create_struct_preproc_pipeline(working_dir,
                                   freesurfer_dir,
                                   ds_dir,
                                   use_fs_brainmask,
                                   name='struct_preproc'):
    """

    """

    # initiate workflow
    struct_preproc_wf = Workflow(name=name)
    struct_preproc_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting',
                                              'rsfMRI_preprocessing')
    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['t1w', 'subject_id']),
                     name='inputnode')

    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        't1w_brain', 'struct_brain_mask', 'fast_partial_volume_files',
        'wm_mask', 'csf_mask', 'wm_mask_4_bbr', 'gm_mask'
    ]),
                      name='outputnode')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]

    # CREATE BRAIN MASK
    if use_fs_brainmask:
        # brainmask with fs
        fs_source = Node(interface=nio.FreeSurferSource(), name='fs_source')
        fs_source.inputs.subjects_dir = freesurfer_dir
        struct_preproc_wf.connect(inputnode, 'subject_id', fs_source,
                                  'subject_id')

        # get aparc+aseg from list
        def get_aparc_aseg(files):
            for name in files:
                if 'aparc+aseg' in name:
                    return name

        aseg = Node(fs.MRIConvert(out_type='niigz', out_file='aseg.nii.gz'),
                    name='aseg')
        struct_preproc_wf.connect(fs_source, ('aparc_aseg', get_aparc_aseg),
                                  aseg, 'in_file')

        fs_brainmask = Node(
            fs.Binarize(
                min=0.5,  #dilate=1,
                out_type='nii.gz'),
            name='fs_brainmask')
        struct_preproc_wf.connect(aseg, 'out_file', fs_brainmask, 'in_file')

        # fill holes in mask, smooth, rebinarize
        fillholes = Node(fsl.maths.MathsCommand(
            args='-fillh -s 3 -thr 0.1 -bin', out_file='T1_brain_mask.nii.gz'),
                         name='fillholes')

        struct_preproc_wf.connect(fs_brainmask, 'binary_file', fillholes,
                                  'in_file')

        fs_2_struct_mat = Node(util.Function(
            input_names=['moving_image', 'target_image'],
            output_names=['fsl_file'],
            function=tkregister2_fct),
                               name='fs_2_struct_mat')

        struct_preproc_wf.connect([(fs_source, fs_2_struct_mat,
                                    [('T1', 'moving_image'),
                                     ('rawavg', 'target_image')])])

        struct_brain_mask = Node(fsl.ApplyXfm(interp='nearestneighbour'),
                                 name='struct_brain_mask_fs')
        struct_preproc_wf.connect(fillholes, 'out_file', struct_brain_mask,
                                  'in_file')
        struct_preproc_wf.connect(inputnode, 't1w', struct_brain_mask,
                                  'reference')
        struct_preproc_wf.connect(fs_2_struct_mat, 'fsl_file',
                                  struct_brain_mask, 'in_matrix_file')
        struct_preproc_wf.connect(struct_brain_mask, 'out_file', outputnode,
                                  'struct_brain_mask')
        struct_preproc_wf.connect(struct_brain_mask, 'out_file', ds,
                                  'struct_prep.struct_brain_mask')

        # multiply t1w with fs brain mask
        t1w_brain = Node(fsl.maths.BinaryMaths(operation='mul'),
                         name='t1w_brain')
        struct_preproc_wf.connect(inputnode, 't1w', t1w_brain, 'in_file')
        struct_preproc_wf.connect(struct_brain_mask, 'out_file', t1w_brain,
                                  'operand_file')
        struct_preproc_wf.connect(t1w_brain, 'out_file', outputnode,
                                  't1w_brain')
        struct_preproc_wf.connect(t1w_brain, 'out_file', ds,
                                  'struct_prep.t1w_brain')

    else:  # use bet
        t1w_brain = Node(fsl.BET(mask=True, outline=True, surfaces=True),
                         name='t1w_brain')
        struct_preproc_wf.connect(inputnode, 't1w', t1w_brain, 'in_file')
        struct_preproc_wf.connect(t1w_brain, 'out_file', outputnode,
                                  't1w_brain')

        def struct_brain_mask_bet_fct(in_file):
            return in_file

        struct_brain_mask = Node(util.Function(
            input_names=['in_file'],
            output_names=['out_file'],
            function=struct_brain_mask_bet_fct),
                                 name='struct_brain_mask')
        struct_preproc_wf.connect(t1w_brain, 'mask_file', struct_brain_mask,
                                  'in_file')
        struct_preproc_wf.connect(struct_brain_mask, 'out_file', outputnode,
                                  'struct_brain_mask')
        struct_preproc_wf.connect(struct_brain_mask, 'out_file', ds,
                                  'struct_prep.struct_brain_mask')

    # SEGMENTATION WITH FAST
    fast = Node(fsl.FAST(), name='fast')
    struct_preproc_wf.connect(t1w_brain, 'out_file', fast, 'in_files')
    struct_preproc_wf.connect(fast, 'partial_volume_files', outputnode,
                              'fast_partial_volume_files')
    struct_preproc_wf.connect(fast, 'partial_volume_files', ds,
                              'struct_prep.fast')

    # functions to select tissue classes
    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    def selectsingle(files, idx):
        return files[idx]

    # pve0: CSF
    # pve1: GM
    # pve2: WM
    # binarize tissue classes
    binarize_tissue = MapNode(
        fsl.ImageMaths(op_string='-nan -thr 0.99 -ero -bin'),
        iterfield=['in_file'],
        name='binarize_tissue')

    struct_preproc_wf.connect(fast,
                              ('partial_volume_files', selectindex, [0, 2]),
                              binarize_tissue, 'in_file')

    # OUTPUT  WM AND CSF MASKS FOR CPAC DENOISING
    struct_preproc_wf.connect([(binarize_tissue, outputnode,
                                [(('out_file', selectsingle, 0), 'csf_mask'),
                                 (('out_file', selectsingle, 1), 'wm_mask')])])

    # WRITE WM MASK WITH P > .5 FOR FSL BBR
    # use threshold of .5 like FSL's epi_reg script
    wm_mask_4_bbr = Node(fsl.ImageMaths(op_string='-thr 0.5 -bin'),
                         name='wm_mask_4_bbr')
    struct_preproc_wf.connect(fast, ('partial_volume_files', selectindex, [2]),
                              wm_mask_4_bbr, 'in_file')
    struct_preproc_wf.connect(wm_mask_4_bbr, 'out_file', outputnode,
                              'wm_mask_4_bbr')

    struct_preproc_wf.write_graph(dotfilename=struct_preproc_wf.name,
                                  graph2use='flat',
                                  format='pdf')

    return struct_preproc_wf
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = Workflow(name=name)

    inputnode = Node(interface=IdentityInterface(fields=[
        'source_files', 'mean_image', 'subject_id', 'subjects_dir',
        'target_image'
    ]),
                     name='inputspec')

    outputnode = Node(interface=IdentityInterface(fields=[
        'func2anat_transform', 'out_reg_file', 'anat2target_transform',
        'transforms', 'transformed_mean', 'segmentation_files', 'anat2target',
        'aparc'
    ]),
                      name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(), name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
    """
    Estimate the tissue classes from the anatomical image. But use spm's segment
    as FSL appears to be breaking.
    """

    stripper = Node(fsl.BET(), name='stripper')
    register.connect(convert, 'out_file', stripper, 'in_file')
    fast = Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')
    """
    Binarize the segmentation
    """

    binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
                       iterfield=['in_file'],
                       name='binarize')
    register.connect(fast, 'partial_volume_files', binarize, 'in_file')
    """
    Apply inverse transform to take segmentations to functional space
    """

    applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
                                                    interp='nearest'),
                       iterfield=['target_file'],
                       name='inverse_transform')
    register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
    register.connect(binarize, 'out_file', applyxfm, 'target_file')
    register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                 interp='nearest'),
                    name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm,
                     'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 3
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    warpmean.inputs.num_threads = 4

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(applyxfm, 'transformed_file', outputnode,
                     'segmentation_files')
    register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file', outputnode,
                     'func2anat_transform')
    register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')

    return register
Exemple #23
0
def create_first_SPM(name='modelfit'):
    """First level task-fMRI modelling workflow
    
    Parameters
    ----------
    name : name of workflow. Default = 'modelfit'
    
    Inputs
    ------
    inputspec.session_info :
    inputspec.interscan_interval :
    inputspec.contrasts :
    inputspec.functional_data :
    inputspec.bases :
    inputspec.model_serial_correlations :
    
    Outputs
    -------
    outputspec.copes :
    outputspec.varcopes :
    outputspec.dof_file :
    outputspec.pfiles :
    outputspec.parameter_estimates :
    outputspec.zstats :
    outputspec.tstats :
    outputspec.design_image :
    outputspec.design_file :
    outputspec.design_cov :
    
    Returns
    -------
    workflow : first-level workflow
    """
    import nipype.interfaces.spm as spm  # fsl
    import nipype.interfaces.freesurfer as fs
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    modelfit = pe.Workflow(name=name)

    inputspec = pe.Node(util.IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'estimation_method',
        'bases', 'mask', 'model_serial_correlations'
    ]),
                        name='inputspec')

    level1design = pe.Node(interface=spm.Level1Design(timing_units='secs'),
                           name="create_level1_design")

    modelestimate = pe.Node(interface=spm.EstimateModel(),
                            name='estimate_model')

    conestimate = pe.Node(interface=spm.EstimateContrast(),
                          name='estimate_contrast')

    convert = pe.MapNode(interface=fs.MRIConvert(out_type='nii'),
                         name='convert',
                         iterfield=['in_file'])

    outputspec = pe.Node(util.IdentityInterface(fields=[
        'RPVimage', 'beta_images', 'mask_image', 'residual_image',
        'con_images', 'ess_images', 'spmF_images', 'spmT_images',
        'spm_mat_file'
    ]),
                         name='outputspec')

    # Utility function

    pop_lambda = lambda x: x[0]

    # Setup the connections

    modelfit.connect([
        (inputspec, level1design,
         [('interscan_interval', 'interscan_interval'),
          ('session_info', 'session_info'), ('bases', 'bases'),
          ('mask', 'mask_image'),
          ('model_serial_correlations', 'model_serial_correlations')]),
        (inputspec, conestimate, [('contrasts', 'contrasts')]),
        (inputspec, modelestimate, [('estimation_method', 'estimation_method')
                                    ]),
        (level1design, modelestimate, [('spm_mat_file', 'spm_mat_file')]),
        (modelestimate, conestimate, [('beta_images', 'beta_images'),
                                      ('residual_image', 'residual_image'),
                                      ('spm_mat_file', 'spm_mat_file')]),
        (modelestimate, outputspec, [('RPVimage', 'RPVimage'),
                                     ('beta_images', 'beta_images'),
                                     ('mask_image', 'mask_image'),
                                     ('residual_image', 'residual_image')]),
        (conestimate, convert, [('con_images', 'in_file')]),
        (convert, outputspec, [('out_file', 'con_images')]),
        (conestimate, outputspec, [('ess_images', 'ess_images'),
                                   ('spmF_images', 'spmF_images'),
                                   ('spmT_images', 'spmT_images'),
                                   ('spm_mat_file', 'spm_mat_file')])
    ])

    return modelfit
def coreg_without_resample(name="highres_coreg"):
    inputnode = pe.Node(interface=util.IdentityInterface(
        fields=["fixed_image", "moving_image", "interp"]),
                        name="inputnode")

    outputnode = pe.Node(interface=util.IdentityInterface(fields=[
        "out_file", "lowres_matrix_file", "highres_matrix_file",
        "resampled_fixed_image"
    ]),
                         name="outputnode")
    coregister_moving_to_fixed = pe.Node(interface=fsl.FLIRT(dof=12),
                                         name='coregister_moving_to_fixed')
    resample_fixed_to_moving = pe.Node(interface=fs.MRIConvert(),
                                       name='resample_fixed_to_moving')

    rewrite_mat_interface = util.Function(
        input_names=[
            "in_matrix", "orig_img", "target_img", "shape", "vox_size"
        ],
        output_names=["out_image", "out_matrix_file"],
        function=rewrite_mat_for_applyxfm)
    fix_FOV_in_matrix = pe.Node(interface=rewrite_mat_interface,
                                name='fix_FOV_in_matrix')

    apply_fixed_matrix = pe.Node(interface=fsl.ApplyXfm(),
                                 name='apply_fixed_matrix')

    final_rigid_reg_to_fixed = pe.Node(interface=fsl.FLIRT(dof=6),
                                       name='final_rigid_reg_to_fixed')

    create_highres_xfm = pe.Node(interface=fsl.ConvertXFM(),
                                 name='create_highres_xfm')
    create_highres_xfm.inputs.concat_xfm = True

    workflow = pe.Workflow(name=name)

    workflow.connect([(inputnode, coregister_moving_to_fixed, [("moving_image",
                                                                "in_file")])])
    workflow.connect([(inputnode, coregister_moving_to_fixed,
                       [("fixed_image", "reference")])])
    workflow.connect([(coregister_moving_to_fixed, fix_FOV_in_matrix,
                       [("out_matrix_file", "in_matrix")])])
    workflow.connect([(inputnode, fix_FOV_in_matrix, [("moving_image",
                                                       "orig_img")])])
    workflow.connect([(inputnode, fix_FOV_in_matrix, [("fixed_image",
                                                       "target_img")])])

    workflow.connect([(inputnode, apply_fixed_matrix, [("moving_image",
                                                        "in_file")])])
    workflow.connect([(inputnode, apply_fixed_matrix, [("interp", "interp")])])
    workflow.connect([(fix_FOV_in_matrix, apply_fixed_matrix,
                       [("out_matrix_file", "in_matrix_file")])])
    workflow.connect([(fix_FOV_in_matrix, apply_fixed_matrix,
                       [("out_image", "reference")])])

    workflow.connect([(inputnode, resample_fixed_to_moving, [('fixed_image',
                                                              'in_file')])])
    workflow.connect([(inputnode, resample_fixed_to_moving,
                       [('moving_image', 'reslice_like')])])
    workflow.connect([(resample_fixed_to_moving, final_rigid_reg_to_fixed,
                       [('out_file', 'reference')])])
    workflow.connect([(apply_fixed_matrix, final_rigid_reg_to_fixed,
                       [('out_file', 'in_file')])])
    workflow.connect([(inputnode, final_rigid_reg_to_fixed, [('interp',
                                                              'interp')])])

    workflow.connect([(final_rigid_reg_to_fixed, create_highres_xfm,
                       [('out_matrix_file', 'in_file2')])])
    workflow.connect([(fix_FOV_in_matrix, create_highres_xfm,
                       [('out_matrix_file', 'in_file')])])

    workflow.connect([(coregister_moving_to_fixed, outputnode,
                       [('out_matrix_file', 'lowres_matrix_file')])])
    workflow.connect([(create_highres_xfm, outputnode,
                       [('out_file', 'highres_matrix_file')])])

    workflow.connect([(resample_fixed_to_moving, outputnode,
                       [('out_file', 'resampled_fixed_image')])])
    workflow.connect([(final_rigid_reg_to_fixed, outputnode, [('out_file',
                                                               'out_file')])])
    return workflow
    def create_workflow(self, flow, inputnode, outputnode):
        # print inputnode
        processing_input = pe.Node(interface=util.IdentityInterface(
            fields=['diffusion', 'aparc_aseg', 'aseg', 'bvecs', 'bvals', 'grad', 'acqp', 'index', 'T1', 'brain',
                    'brain_mask', 'wm_mask_file', 'roi_volumes']), name='processing_input')

        # For DSI acquisition: extract the hemisphere that contains the data
        # if self.config.start_vol > 0 or self.config.end_vol < self.config.max_vol:
        #
        #     split_vol = pe.Node(interface=splitDiffusion(),name='split_vol')
        #     split_vol.inputs.start = self.config.start_vol
        #     split_vol.inputs.end = self.config.end_vol
        #
        #     split_bvecbval = pe.Node(interface=splitBvecBval(),name='split_bvecsbvals')
        #     split_bvecbval.inputs.start = self.config.start_vol
        #     split_bvecbval.inputs.end = self.config.end_vol
        #     split_bvecbval.inputs.orientation = 'h'
        #     split_bvecbval.inputs.delimiter = ' '
        #
        #     flow.connect([
        #                 (inputnode,split_vol,[('diffusion','in_file')]),
        #                 (split_vol,processing_input,[('data','diffusion')]),
        #                 (inputnode,split_bvecbval,[('bvecs','bvecs'),('bvals','bvals')]),
        #                 (split_bvecbval,processing_input,[('bvecs_split','bvecs'),('bvals_split','bvals')])
        #                 ])
        #
        # else:

        flow.connect([
            (inputnode, processing_input, [
             ('diffusion', 'diffusion'), ('bvecs', 'bvecs'), ('bvals', 'bvals')]),
        ])

        flow.connect([
            (inputnode, processing_input,
             [('T1', 'T1'), ('aparc_aseg', 'aparc_aseg'), ('aseg', 'aseg'), ('brain', 'brain'),
              ('brain_mask', 'brain_mask'), ('wm_mask_file', 'wm_mask_file'), ('roi_volumes', 'roi_volumes')]),
            (processing_input, outputnode, [('bvals', 'bvals')])
        ])

        # Conversion to MRTrix image format ".mif", grad_fsl=(inputnode.inputs.bvecs,inputnode.inputs.bvals)
        mr_convert = pe.Node(interface=MRConvert(
            stride=[1, 2, +3, +4]), name='mr_convert')
        mr_convert.inputs.quiet = True
        mr_convert.inputs.force_writing = True

        concatnode = pe.Node(interface=util.Merge(2), name='concatnode')

        def convertList2Tuple(lists):
            # print "******************************************",tuple(lists)
            return tuple(lists)

        flow.connect([
            # (processing_input,concatnode,[('bvecs','in1'),('bvals','in2')]),
            (processing_input, concatnode, [('bvecs', 'in1')]),
            (processing_input, concatnode, [('bvals', 'in2')]),
            (concatnode, mr_convert, [
             (('out', convertList2Tuple), 'grad_fsl')])
        ])

        # Convert Freesurfer data
        mr_convert_brainmask = pe.Node(
            interface=MRConvert(out_filename='brainmaskfull.nii.gz', stride=[
                                1, 2, 3], output_datatype='float32'),
            name='mr_convert_brain_mask')
        mr_convert_brain = pe.Node(
            interface=MRConvert(out_filename='anat_masked.nii.gz', stride=[
                                1, 2, 3], output_datatype='float32'),
            name='mr_convert_brain')
        mr_convert_T1 = pe.Node(
            interface=MRConvert(out_filename='anat.nii.gz', stride=[
                                1, 2, 3], output_datatype='float32'),
            name='mr_convert_T1')
        mr_convert_roi_volumes = pe.Node(
            interface=ApplymultipleMRConvert(
                stride=[1, 2, 3], output_datatype='float32', extension='nii'),
            name='mr_convert_roi_volumes')
        mr_convert_wm_mask_file = pe.Node(
            interface=MRConvert(out_filename='wm_mask_file.nii.gz', stride=[
                                1, 2, 3], output_datatype='float32'),
            name='mr_convert_wm_mask_file')

        flow.connect([
            (processing_input, mr_convert_brainmask,
             [('brain_mask', 'in_file')]),
            (processing_input, mr_convert_brain, [('brain', 'in_file')]),
            (processing_input, mr_convert_T1, [('T1', 'in_file')]),
            (processing_input, mr_convert_roi_volumes,
             [('roi_volumes', 'in_files')]),
            (processing_input, mr_convert_wm_mask_file,
             [('wm_mask_file', 'in_file')])
        ])

        # if self.config.partial_volume_estimation:
        #     pve_extractor_from_5tt = pe.Node(interface=ExtractPVEsFrom5TT(),name='pve_extractor_from_5tt')
        #     pve_extractor.inputs.pve_csf_file = 'pve_0.nii.gz'
        #     pve_extractor.inputs.pve_csf_file = 'pve_1.nii.gz'
        #     pve_extractor.inputs.pve_csf_file = 'pve_2.nii.gz'
        #
        #     flow.connect([
        #                 (mrtrix_5tt,pve_extractor_from_5tt,[('out_file','in_5tt')]),
        #                 (processing_input,pve_extractor_from_5tt,[('T1','ref_image')]),
        #                 ])

        # from nipype.interfaces import fsl
        # # Run FAST for partial volume estimation (WM;GM;CSF)
        # fastr = pe.Node(interface=fsl.FAST(),name='fastr')
        # fastr.inputs.out_basename = 'fast_'
        # fastr.inputs.number_classes = 3
        #
        # if self.config.fast_use_priors:
        #     fsl_flirt = pe.Node(interface=fsl.FLIRT(out_file='Template2Input.nii.gz',out_matrix_file='template2input.mat'),name="linear_registration")
        #     #fsl_flirt.inputs.in_file = os.environ['FSLDIR']+'/data/standard/MNI152_T1_1mm.nii.gz'
        #     template_path = os.path.join('data', 'segmentation', 'ants_template_IXI')
        #     fsl_flirt.inputs.in_file = pkg_resources.resource_filename('cmtklib', os.path.join(template_path, 'T_template2.nii.gz'))
        #     #fsl_flirt.inputs.dof = self.config.dof
        #     #fsl_flirt.inputs.cost = self.config.fsl_cost
        #     #fsl_flirt.inputs.no_search = self.config.no_search
        #     fsl_flirt.inputs.verbose = True
        #
        #     flow.connect([
        #                 (mr_convert_T1, fsl_flirt, [('converted','reference')]),
        #                 ])
        #
        #     fastr.inputs.use_priors = True
        #     fastr.inputs.other_priors = [pkg_resources.resource_filename('cmtklib', os.path.join(template_path,'3Class-Priors','priors1.nii.gz')),
        #                                  pkg_resources.resource_filename('cmtklib', os.path.join(template_path,'3Class-Priors','priors2.nii.gz')),
        #                                  pkg_resources.resource_filename('cmtklib', os.path.join(template_path,'3Class-Priors','priors3.nii.gz'))
        #                                 ]
        #     flow.connect([
        #                 (fsl_flirt, fastr, [('out_matrix_file','init_transform')]),
        #                 ])
        #
        # flow.connect([
        #             (mr_convert_brain,fastr,[('converted','in_files')]),
        #             # (fastr,outputnode,[('partial_volume_files','partial_volume_files')])
        #             ])

        # Threshold converted Freesurfer brainmask into a binary mask
        mr_threshold_brainmask = pe.Node(interface=MRThreshold(abs_value=1, out_file='brain_mask.nii.gz'),
                                         name='mr_threshold_brainmask')

        flow.connect([
            (mr_convert_brainmask, mr_threshold_brainmask,
             [('converted', 'in_file')])
        ])

        # Extract b0 and create DWI mask
        flirt_dwimask_pre = pe.Node(interface=fsl.FLIRT(out_file='brain2b0.nii.gz', out_matrix_file='brain2b0aff'),
                                    name='flirt_dwimask_pre')
        costs = ['mutualinfo', 'corratio', 'normcorr',
                 'normmi', 'leastsq', 'labeldiff', 'bbr']
        flirt_dwimask_pre.inputs.cost = costs[3]
        flirt_dwimask_pre.inputs.cost_func = costs[3]
        flirt_dwimask_pre.inputs.dof = 6
        flirt_dwimask_pre.inputs.no_search = False

        flirt_dwimask = pe.Node(
            interface=fsl.FLIRT(out_file='dwi_brain_mask.nii.gz',
                                apply_xfm=True, interp='nearestneighbour'),
            name='flirt_dwimask')

        mr_convert_b0 = pe.Node(interface=MRConvert(out_filename='b0.nii.gz', stride=[+1, +2, +3]),
                                name='mr_convert_b0')
        mr_convert_b0.inputs.extract_at_axis = 3
        mr_convert_b0.inputs.extract_at_coordinate = [0]

        flow.connect([
            (processing_input, mr_convert_b0, [('diffusion', 'in_file')])
        ])

        flow.connect([
            (mr_convert_T1, flirt_dwimask_pre, [('converted', 'in_file')]),
            (mr_convert_b0, flirt_dwimask_pre, [('converted', 'reference')]),
            (mr_convert_b0, flirt_dwimask, [('converted', 'reference')]),
            (flirt_dwimask_pre, flirt_dwimask, [
             ('out_matrix_file', 'in_matrix_file')]),
            (mr_threshold_brainmask, flirt_dwimask,
             [('thresholded', 'in_file')])
        ])

        # Diffusion data denoising
        if self.config.denoising:

            mr_convert_noise = pe.Node(interface=MRConvert(out_filename='diffusion_noisemap.nii.gz', stride=[+1, +2, +3, +4]),
                                       name='mr_convert_noise')

            if self.config.denoising_algo == "MRtrix (MP-PCA)":
                mr_convert.inputs.out_filename = 'diffusion.mif'
                dwi_denoise = pe.Node(
                    interface=DWIDenoise(
                        out_file='diffusion_denoised.mif', out_noisemap='diffusion_noisemap.mif'),
                    name='dwi_denoise')
                dwi_denoise.inputs.force_writing = True
                dwi_denoise.inputs.debug = True
                dwi_denoise.ignore_exception = True

                flow.connect([
                    # (processing_input,mr_convert,[('diffusion','in_file')]),
                    (processing_input, mr_convert, [('diffusion', 'in_file')]),
                    (mr_convert, dwi_denoise, [('converted', 'in_file')]),
                    (flirt_dwimask, dwi_denoise, [('out_file', 'mask')]),
                ])

            elif self.config.denoising_algo == "Dipy (NLM)":
                mr_convert.inputs.out_filename = 'diffusion_denoised.mif'
                dwi_denoise = pe.Node(
                    interface=dipy.Denoise(), name='dwi_denoise')
                if self.config.dipy_noise_model == "Gaussian":
                    dwi_denoise.inputs.noise_model = "gaussian"
                elif self.config.dipy_noise_model == "Rician":
                    dwi_denoise.inputs.noise_model = "rician"

                flow.connect([
                    (processing_input, dwi_denoise,
                     [('diffusion', 'in_file')]),
                    (flirt_dwimask, dwi_denoise, [('out_file', 'in_mask')]),
                    (dwi_denoise, mr_convert, [('out_file', 'in_file')])
                ])

            flow.connect([
                (dwi_denoise, mr_convert_noise, [('out_file', 'in_file')]),
                (mr_convert_noise, outputnode, [('converted', 'diffusion_noisemap')])
            ])
        else:
            mr_convert.inputs.out_filename = 'diffusion.mif'
            flow.connect([
                (processing_input, mr_convert, [('diffusion', 'in_file')])
            ])

        mr_convert_b = pe.Node(interface=MRConvert(out_filename='diffusion_corrected.nii.gz', stride=[+1, +2, +3, +4]),
                               name='mr_convert_b')

        if self.config.bias_field_correction:

            mr_convert_bias = pe.Node(interface=MRConvert(out_filename='diffusion_biasfield.nii.gz', stride=[+1, +2, +3, +4]),
                                      name='mr_convert_bias')

            if self.config.bias_field_algo == "ANTS N4":
                dwi_biascorrect = pe.Node(
                    interface=DWIBiasCorrect(
                        use_ants=True, out_bias='diffusion_denoised_biasfield.mif'),
                    name='dwi_biascorrect')
            elif self.config.bias_field_algo == "FSL FAST":
                dwi_biascorrect = pe.Node(
                    interface=DWIBiasCorrect(
                        use_fsl=True, out_bias='diffusion_denoised_biasfield.mif'),
                    name='dwi_biascorrect')

            dwi_biascorrect.inputs.debug = True

            if self.config.denoising:
                if self.config.denoising_algo == "MRtrix (MP-PCA)":
                    flow.connect([
                        (dwi_denoise, dwi_biascorrect,
                         [('out_file', 'in_file')]),
                        (flirt_dwimask, dwi_biascorrect,
                         [('out_file', 'mask')]),
                        (dwi_biascorrect, mr_convert_b,
                         [('out_file', 'in_file')])
                    ])
                elif self.config.denoising_algo == "Dipy (NLM)":
                    flow.connect([
                        (mr_convert, dwi_biascorrect,
                         [('converted', 'in_file')]),
                        (flirt_dwimask, dwi_biascorrect,
                         [('out_file', 'mask')]),
                        (dwi_biascorrect, mr_convert_b,
                         [('out_file', 'in_file')])
                    ])
            else:
                flow.connect([
                    (mr_convert, dwi_biascorrect, [('converted', 'in_file')]),
                    (flirt_dwimask, dwi_biascorrect, [('out_file', 'mask')])
                ])

            flow.connect([
                (dwi_biascorrect, mr_convert_bias, [('out_file', 'in_file')]),
                (mr_convert_bias, outputnode, [('converted', 'diffusion_biasfield')])
            ])
        else:
            if self.config.denoising:
                if self.config.denoising_algo == "MRtrix (MP-PCA)":
                    flow.connect([
                        (dwi_denoise, mr_convert_b, [('out_file', 'in_file')])
                    ])
                elif self.config.denoising_algo == "Dipy (NLM)":
                    flow.connect([
                        (mr_convert, mr_convert_b, [('converted', 'in_file')])
                    ])
            else:
                flow.connect([
                    (mr_convert, mr_convert_b, [('converted', 'in_file')])
                ])

        extract_grad_mrtrix = pe.Node(interface=ExtractMRTrixGrad(out_grad_mrtrix='grad.txt'),
                                      name='extract_grad_mrtrix')
        flow.connect([
            (mr_convert, extract_grad_mrtrix, [("converted", "in_file")])
        ])
        # extract_grad_fsl = pe.Node(interface=mrt.MRTrixInfo(out_grad_mrtrix=('diffusion_denoised.bvec','diffusion_denoised.bval')),name='extract_grad_fsl')

        # TODO extract the total readout directly from the BIDS json file
        acqpnode = pe.Node(interface=CreateAcqpFile(
            total_readout=self.config.total_readout), name='acqpnode')

        indexnode = pe.Node(interface=CreateIndexFile(), name='indexnode')
        flow.connect([
            (extract_grad_mrtrix, indexnode, [
             ("out_grad_mrtrix", "in_grad_mrtrix")])
        ])

        fs_mriconvert = pe.Node(
            interface=fs.MRIConvert(
                out_type='niigz', out_file='diffusion_preproc_resampled.nii.gz'),
            name="diffusion_resample")
        fs_mriconvert.inputs.vox_size = self.config.resampling
        fs_mriconvert.inputs.resample_type = self.config.interpolation

        mr_convert_b0_resample = pe.Node(interface=MRConvert(out_filename='b0_resampled.nii.gz', stride=[+1, +2, +3]),
                                         name='mr_convert_b0_resample')
        mr_convert_b0_resample.inputs.extract_at_axis = 3
        mr_convert_b0_resample.inputs.extract_at_coordinate = [0]

        # fs_mriconvert_b0 = pe.Node(interface=fs.MRIConvert(out_type='niigz',out_file='b0_resampled.nii.gz'),name="b0_resample")
        # fs_mriconvert_b0.inputs.vox_size = self.config.resampling
        # fs_mriconvert_b0.inputs.resample_type = self.config.interpolation

        flow.connect([
            (fs_mriconvert, mr_convert_b0_resample, [('out_file', 'in_file')]),
        ])

        # resampling Freesurfer data and setting output type to short
        fs_mriconvert_T1 = pe.Node(interface=fs.MRIConvert(out_type='niigz', out_file='anat_resampled.nii.gz'),
                                   name="anat_resample")
        fs_mriconvert_T1.inputs.vox_size = self.config.resampling
        fs_mriconvert_T1.inputs.resample_type = self.config.interpolation

        flow.connect([
            (mr_convert_T1, fs_mriconvert_T1, [('converted', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_T1,[('converted','reslice_like')]),
            (fs_mriconvert_T1, outputnode, [('out_file', 'T1')])
        ])

        fs_mriconvert_brain = pe.Node(
            interface=fs.MRIConvert(
                out_type='niigz', out_file='anat_masked_resampled.nii.gz'),
            name="anat_masked_resample")
        fs_mriconvert_brain.inputs.vox_size = self.config.resampling
        fs_mriconvert_brain.inputs.resample_type = self.config.interpolation

        flow.connect([
            (mr_convert_brain, fs_mriconvert_brain,
             [('converted', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_brain,[('converted','reslice_like')]),
            (fs_mriconvert_brain, outputnode, [('out_file', 'brain')])
        ])

        fs_mriconvert_brainmask = pe.Node(
            interface=fs.MRIConvert(
                out_type='niigz', resample_type='nearest', out_file='brain_mask_resampled.nii.gz'),
            name="brain_mask_resample")
        fs_mriconvert_brainmask.inputs.vox_size = self.config.resampling
        flow.connect([
            (mr_threshold_brainmask, fs_mriconvert_brainmask,
             [('thresholded', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_brainmask,[('converted','reslice_like')]),
            (fs_mriconvert_brainmask, outputnode, [('out_file', 'brain_mask')])
        ])

        fs_mriconvert_brainmaskfull = pe.Node(
            interface=fs.MRIConvert(
                out_type='niigz', out_file='brain_mask_full_resampled.nii.gz'),
            name="brain_mask_full_resample")
        fs_mriconvert_brainmaskfull.inputs.vox_size = self.config.resampling
        fs_mriconvert_brainmaskfull.inputs.resample_type = self.config.interpolation
        flow.connect([
            (mr_convert_brainmask, fs_mriconvert_brainmaskfull,
             [('converted', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_brainmaskfull,[('converted','reslice_like')]),
            (fs_mriconvert_brainmaskfull, outputnode,
             [('out_file', 'brain_mask_full')])
        ])

        fs_mriconvert_wm_mask = pe.Node(
            interface=fs.MRIConvert(
                out_type='niigz', resample_type='nearest', out_file='wm_mask_resampled.nii.gz'),
            name="wm_mask_resample")
        fs_mriconvert_wm_mask.inputs.vox_size = self.config.resampling
        flow.connect([
            (mr_convert_wm_mask_file, fs_mriconvert_wm_mask,
             [('converted', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_wm_mask,[('converted','reslice_like')]),
            (fs_mriconvert_wm_mask, outputnode, [('out_file', 'wm_mask_file')])
        ])

        fs_mriconvert_ROIs = pe.MapNode(interface=fs.MRIConvert(out_type='niigz', resample_type='nearest'),
                                        iterfield=['in_file'], name="ROIs_resample")
        fs_mriconvert_ROIs.inputs.vox_size = self.config.resampling
        flow.connect([
            (mr_convert_roi_volumes, fs_mriconvert_ROIs,
             [('converted_files', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_ROIs,[('converted','reslice_like')]),
            (fs_mriconvert_ROIs, outputnode, [("out_file", "roi_volumes")])
        ])

        # fs_mriconvert_PVEs = pe.MapNode(interface=fs.MRIConvert(out_type='niigz'),name="PVEs_resample",iterfield=['in_file'])
        # fs_mriconvert_PVEs.inputs.vox_size = self.config.resampling
        # fs_mriconvert_PVEs.inputs.resample_type = self.config.interpolation
        # flow.connect([
        #             (fastr,fs_mriconvert_PVEs,[('partial_volume_files','in_file')]),
        #             #(mr_convert_b0_resample,fs_mriconvert_ROIs,[('converted','reslice_like')]),
        #             (fs_mriconvert_PVEs,outputnode,[("out_file","partial_volume_files")])
        #             ])

        fs_mriconvert_dwimask = pe.Node(interface=fs.MRIConvert(out_type='niigz', resample_type='nearest',
                                                                out_file='dwi_brain_mask_resampled.nii.gz'),
                                        name="dwi_brainmask_resample")
        # fs_mriconvert_dwimask.inputs.vox_size = self.config.resampling
        flow.connect([
            (flirt_dwimask, fs_mriconvert_dwimask, [('out_file', 'in_file')]),
            (mr_convert_b0_resample, fs_mriconvert_dwimask,
             [('converted', 'reslice_like')]),
            (fs_mriconvert_dwimask, outputnode,
             [('out_file', 'dwi_brain_mask')])
        ])

        # TODO Implementation of FSL Topup

        if self.config.eddy_current_and_motion_correction:

            if self.config.eddy_correction_algo == 'FSL eddy_correct':

                eddy_correct = pe.Node(interface=fsl.EddyCorrect(ref_num=0, out_file='eddy_corrected.nii.gz'),
                                       name='eddy_correct')

                flow.connect([
                    (processing_input, outputnode, [("bvecs", "bvecs_rot")])
                ])

                if self.config.eddy_correct_motion_correction:

                    mc_flirt = pe.Node(
                        interface=fsl.MCFLIRT(
                            out_file='motion_corrected.nii.gz', ref_vol=0, save_mats=True),
                        name='motion_correction')
                    flow.connect([
                        (mr_convert_b, mc_flirt, [("converted", "in_file")])
                    ])

                    # FIXME rotate b vectors after motion correction (mcflirt)

                    flow.connect([
                        (mc_flirt, eddy_correct, [("out_file", "in_file")])
                    ])
                else:

                    flow.connect([
                        (mr_convert_b, eddy_correct,
                         [("converted", "in_file")])
                    ])

                # # DTK needs fixed number of directions (512)
                # if self.config.start_vol > 0 and self.config.end_vol == self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
                #     flow.connect([
                #                 (split_vol,merge_filenames,[("padding1","in1")]),
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in2")]),
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")]),
                #                 ])
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                # elif self.config.start_vol > 0 and self.config.end_vol < self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(3),name='merge_files')
                #     flow.connect([
                #                 (split_vol,merge_filenames,[("padding1","in1")]),
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in2")]),
                #                 (split_vol,merge_filenames,[("padding2","in3")]),
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")])
                #                 ])
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                # elif self.config.start_vol == 0 and self.config.end_vol < self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
                #     flow.connect([
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in1")]),
                #                 (split_vol,merge_filenames,[("padding2","in2")]),
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")])
                #                 ])
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                # else:
                flow.connect([
                    (eddy_correct, fs_mriconvert, [
                     ('eddy_corrected', 'in_file')]),
                    (fs_mriconvert, outputnode, [
                     ("out_file", "diffusion_preproc")])
                ])

            else:
                eddy_correct = pe.Node(interface=cmp_fsl.EddyOpenMP(out_file="eddy_corrected.nii.gz", verbose=True),
                                       name='eddy')
                flow.connect([
                    (mr_convert_b, eddy_correct, [("converted", "in_file")]),
                    (processing_input, eddy_correct, [("bvecs", "bvecs")]),
                    (processing_input, eddy_correct, [("bvals", "bvals")]),
                    (flirt_dwimask, eddy_correct, [("out_file", "mask")]),
                    (indexnode, eddy_correct, [("index", "index")]),
                    (acqpnode, eddy_correct, [("acqp", "acqp")])
                ])

                flow.connect([
                    (eddy_correct, outputnode, [
                     ("bvecs_rotated", "bvecs_rot")])
                ])

                # # DTK needs fixed number of directions (512)
                # if self.config.start_vol > 0 and self.config.end_vol == self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
                #     flow.connect([
                #                 (split_vol,merge_filenames,[("padding1","in1")]),
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in1")])
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")]),
                #                 ])
                #     # resampling diffusion image and setting output type to short
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                #
                # elif self.config.start_vol > 0 and self.config.end_vol < self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(3),name='merge_files')
                #     flow.connect([
                #                 (split_vol,merge_filenames,[("padding1","in1")]),
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in1")]),
                #                 (split_vol,merge_filenames,[("padding2","in3")])
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")]),
                #                 ])
                #     # resampling diffusion image and setting output type to short
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                # elif self.config.start_vol == 0 and self.config.end_vol < self.config.max_vol:
                #     merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
                #     flow.connect([
                #                 (eddy_correct,merge_filenames,[("eddy_corrected","in1")]),
                #                 (split_vol,merge_filenames,[("padding2","in2")])
                #                 ])
                #     merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
                #     flow.connect([
                #                 (merge_filenames,merge,[("out","in_files")]),
                #                 ])
                #     # resampling diffusion image and setting output type to short
                #     flow.connect([
                #                 (merge,fs_mriconvert,[('merged_file','in_file')]),
                #                 (fs_mriconvert,outputnode,[("out_file","diffusion_preproc")])
                #                 ])
                # else:
                # resampling diffusion image and setting output type to short
                flow.connect([
                    (eddy_correct, fs_mriconvert, [
                     ('eddy_corrected', 'in_file')]),
                    (fs_mriconvert, outputnode, [
                     ("out_file", "diffusion_preproc")])
                ])
        else:
            # resampling diffusion image and setting output type to short
            flow.connect([
                (mr_convert_b, fs_mriconvert, [("converted", "in_file")]),
                (fs_mriconvert, outputnode, [
                 ("out_file", "diffusion_preproc")]),
                (inputnode, outputnode, [("bvecs", "bvecs_rot")])
            ])

        # #mr_convertB.inputs.grad_fsl = ('bvecs', 'bvals')
        # flow.connect([
        #             (mr_convertF,mr_convertB,[("converted","in_file")])
        #             ])

        # else:
        #     if self.config.start_vol > 0 and self.config.end_vol == self.config.max_vol:
        #         merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
        #         flow.connect([
        #                     (split_vol,merge_filenames,[("padding1","in1")]),
        #                     (mc_flirt,merge_filenames,[("out_file","in2")]),
        #                     ])
        #         merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
        #         flow.connect([
        #                     (merge_filenames,merge,[("out","in_files")]),
        #                     (merge,outputnode,[("merged_file","diffusion_preproc")])
        #                     ])
        #     elif self.config.start_vol > 0 and self.config.end_vol < self.config.max_vol:
        #         merge_filenames = pe.Node(interface=util.Merge(3),name='merge_files')
        #         flow.connect([
        #                     (split_vol,merge_filenames,[("padding1","in1")]),
        #                     (mc_flirt,merge_filenames,[("out_file","in2")]),
        #                     (split_vol,merge_filenames,[("padding2","in3")]),
        #                     ])
        #         merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
        #         flow.connect([
        #                     (merge_filenames,merge,[("out","in_files")]),
        #                     (merge,outputnode,[("merged_file","diffusion_preproc")])
        #                     ])
        #     elif self.config.start_vol == 0 and self.config.end_vol < self.config.max_vol:
        #         merge_filenames = pe.Node(interface=util.Merge(2),name='merge_files')
        #         flow.connect([
        #                     (mc_flirt,merge_filenames,[("out_file","in1")]),
        #                     (split_vol,merge_filenames,[("padding2","in2")]),
        #                     ])
        #         merge = pe.Node(interface=fsl.Merge(dimension='t'),name="merge")
        #         flow.connect([
        #                     (merge_filenames,merge,[("out","in_files")]),
        #                     (merge,outputnode,[("merged_file","diffusion_preproc")])
        #                     ])
        #     else:
        #         flow.connect([
        #                     (mc_flirt,outputnode,[("out_file","diffusion_preproc")])
        #                     ])

        fs_mriconvert_5tt = pe.Node(interface=fs.MRIConvert(out_type='niigz', out_file='act_5tt_resampled.nii.gz'),
                                    name="5tt_resample")
        fs_mriconvert_5tt.inputs.vox_size = self.config.resampling
        fs_mriconvert_5tt.inputs.resample_type = self.config.interpolation

        mrtrix_5tt = pe.Node(interface=Generate5tt(
            out_file='mrtrix_5tt.nii.gz'), name='mrtrix_5tt')
        mrtrix_5tt.inputs.algorithm = 'freesurfer'
        # mrtrix_5tt.inputs.algorithm = 'hsvs'

        flow.connect([
            (processing_input, mrtrix_5tt, [('aparc_aseg', 'in_file')]),
            (mrtrix_5tt, fs_mriconvert_5tt, [('out_file', 'in_file')]),
            (fs_mriconvert_5tt, outputnode, [('out_file', 'act_5TT')]),
        ])

        # if self.config.partial_volume_estimation:
        pve_extractor_from_5tt = pe.Node(
            interface=ExtractPVEsFrom5TT(), name='pve_extractor_from_5tt')
        pve_extractor_from_5tt.inputs.pve_csf_file = 'pve_0.nii.gz'
        pve_extractor_from_5tt.inputs.pve_gm_file = 'pve_1.nii.gz'
        pve_extractor_from_5tt.inputs.pve_wm_file = 'pve_2.nii.gz'

        flow.connect([
            (mrtrix_5tt, pve_extractor_from_5tt, [('out_file', 'in_5tt')]),
            (processing_input, pve_extractor_from_5tt, [('T1', 'ref_image')]),
        ])

        fs_mriconvert_PVEs = pe.MapNode(interface=fs.MRIConvert(out_type='niigz'), iterfield=['in_file'],
                                        name="PVEs_resample")
        fs_mriconvert_PVEs.inputs.vox_size = self.config.resampling
        fs_mriconvert_PVEs.inputs.resample_type = self.config.interpolation
        flow.connect([
            (pve_extractor_from_5tt, fs_mriconvert_PVEs,
             [('partial_volume_files', 'in_file')]),
            # (mr_convert_b0_resample,fs_mriconvert_ROIs,[('converted','reslice_like')]),
            (fs_mriconvert_PVEs, outputnode, [
             ("out_file", "partial_volume_files")])
        ])

        fs_mriconvert_gmwmi = pe.Node(interface=fs.MRIConvert(out_type='niigz', out_file='gmwmi_resampled.nii.gz'),
                                      name="gmwmi_resample")
        fs_mriconvert_gmwmi.inputs.vox_size = self.config.resampling
        fs_mriconvert_gmwmi.inputs.resample_type = self.config.interpolation

        mrtrix_gmwmi = pe.Node(interface=GenerateGMWMInterface(
            out_file='gmwmi.nii.gz'), name='mrtrix_gmwmi')

        update_gmwmi = pe.Node(
            interface=UpdateGMWMInterfaceSeeding(), name='update_gmwmi')
        update_gmwmi.inputs.out_gmwmi_file = 'gmwmi_proc.nii.gz'

        flow.connect([
            (mrtrix_5tt, mrtrix_gmwmi, [('out_file', 'in_file')]),
            (mrtrix_gmwmi, update_gmwmi, [('out_file', 'in_gmwmi_file')]),
            (processing_input, update_gmwmi, [
             ('roi_volumes', 'in_roi_volumes')]),
            (update_gmwmi, fs_mriconvert_gmwmi,
             [('out_gmwmi_file', 'in_file')]),
            (fs_mriconvert_gmwmi, outputnode, [('out_file', 'gmwmi')]),
        ])
Exemple #26
0
def QA_workflow(QAc, c=foo, name='QA'):
    """ Workflow that generates a Quality Assurance Report
    
    Parameters
    ----------
    name : name of workflow
    
    Inputs
    ------
    inputspec.subject_id : Subject id
    inputspec.config_params : configuration parameters to print in PDF (in the form of a 2D List)
    inputspec.in_file : original functional run
    inputspec.art_file : art outlier file
    inputspec.reg_file : bbregister file
    inputspec.tsnr_detrended : detrended image
    inputspec.tsnr : signal-to-noise ratio image
    inputspec.tsnr_mean : mean image
    inputspec.tsnr_stddev : standard deviation image
    inputspec.ADnorm : norm components file from art
    inputspec.TR : repetition time of acquisition
    inputspec.sd : freesurfer subjects directory
    
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util

    from nipype.interfaces.freesurfer import ApplyVolTransform
    from nipype.interfaces import freesurfer as fs
    from nipype.interfaces.io import FreeSurferSource

    from ...scripts.QA_utils import (plot_ADnorm, tsdiffana, tsnr_roi,
                                     combine_table, reduce_table, art_output,
                                     plot_motion, plot_ribbon, plot_anat,
                                     overlay_new, overlay_dB,
                                     spectrum_ts_table)

    from ......utils.reportsink.io import ReportSink
    # Define Workflow

    workflow = pe.Workflow(name=name)

    inputspec = pe.Node(interface=util.IdentityInterface(fields=[
        'subject_id', 'config_params', 'in_file', 'art_file', 'motion_plots',
        'reg_file', 'tsnr', 'tsnr_detrended', 'tsnr_stddev', 'ADnorm', 'TR',
        'sd'
    ]),
                        name='inputspec')

    infosource = pe.Node(util.IdentityInterface(fields=['subject_id']),
                         name='subject_names')
    if QAc.test_mode:
        infosource.iterables = ('subject_id', [QAc.subjects[0]])
    else:
        infosource.iterables = ('subject_id', QAc.subjects)

    datagrabber = preproc_datagrabber(c)

    datagrabber.inputs.node_type = c.motion_correct_node

    orig_datagrabber = get_dataflow(c)

    workflow.connect(infosource, 'subject_id', datagrabber, 'subject_id')

    workflow.connect(infosource, 'subject_id', orig_datagrabber, 'subject_id')

    workflow.connect(orig_datagrabber, 'func', inputspec, 'in_file')
    workflow.connect(infosource, 'subject_id', inputspec, 'subject_id')

    workflow.connect(datagrabber, ('outlier_files', sort), inputspec,
                     'art_file')
    workflow.connect(datagrabber, ('reg_file', sort), inputspec, 'reg_file')
    workflow.connect(datagrabber, ('tsnr', sort), inputspec, 'tsnr')
    workflow.connect(datagrabber, ('tsnr_stddev', sort), inputspec,
                     'tsnr_stddev')
    workflow.connect(datagrabber, ('tsnr_detrended', sort), inputspec,
                     'tsnr_detrended')
    workflow.connect(datagrabber, ('art_norm', sort), inputspec, 'ADnorm')

    if not c.use_metadata:
        inputspec.inputs.TR = c.TR
    else:
        from .....base import load_json
        metadata_file = os.path.join(c.sink_dir, QAc.subjects[0],
                                     'preproc/metadata/metadata.json')
        meta_tr = load_json(metadata_file)["tr"]
        inputspec.inputs.TR = meta_tr

    inputspec.inputs.sd = c.surf_dir

    # Define Nodes

    plot_m = pe.MapNode(util.Function(input_names=['motion_parameters'],
                                      output_names=['fname_t', 'fname_r'],
                                      function=plot_motion),
                        name="motion_plots",
                        iterfield=['motion_parameters'])

    workflow.connect(datagrabber, ('motion_parameters', sort), plot_m,
                     'motion_parameters')
    #workflow.connect(plot_m, 'fname',inputspec,'motion_plots')

    tsdiff = pe.MapNode(util.Function(input_names=['img'],
                                      output_names=['out_file'],
                                      function=tsdiffana),
                        name='tsdiffana',
                        iterfield=["img"])

    art_info = pe.MapNode(
        util.Function(input_names=['art_file', 'intensity_file', 'stats_file'],
                      output_names=['table', 'out', 'intensity_plot'],
                      function=art_output),
        name='art_output',
        iterfield=["art_file", "intensity_file", "stats_file"])

    fssource = pe.Node(interface=FreeSurferSource(), name='fssource')

    plotribbon = pe.Node(util.Function(input_names=['Brain'],
                                       output_names=['images'],
                                       function=plot_ribbon),
                         name="plot_ribbon")

    workflow.connect(fssource, 'ribbon', plotribbon, 'Brain')

    plotanat = pe.Node(util.Function(input_names=['brain'],
                                     output_names=['images'],
                                     function=plot_anat),
                       name="plot_anat")
    plotmask = plotanat.clone('plot_mask')
    workflow.connect(datagrabber, 'mask', plotmask, 'brain')
    roidevplot = tsnr_roi(plot=False,
                          name='tsnr_stddev_roi',
                          roi=['all'],
                          onsets=False)

    if not c.use_metadata:
        roidevplot.inputs.inputspec.TR = c.TR
    else:
        from .....base import load_json
        metadata_file = os.path.join(c.sink_dir, QAc.subjects[0],
                                     'preproc/metadata/metadata.json')
        meta_tr = load_json(metadata_file)["tr"]
        roidevplot.inputs.inputspec.TR = meta_tr

    roisnrplot = tsnr_roi(plot=False,
                          name='SNR_roi',
                          roi=['all'],
                          onsets=False)

    if not c.use_metadata:
        roisnrplot.inputs.inputspec.TR = c.TR
    else:
        from .....base import load_json
        metadata_file = os.path.join(c.sink_dir, QAc.subjects[0],
                                     'preproc/metadata/metadata.json')
        meta_tr = load_json(metadata_file)["tr"]
        roisnrplot.inputs.inputspec.TR = meta_tr

    workflow.connect(fssource, ('aparc_aseg', pickfirst), roisnrplot,
                     'inputspec.aparc_aseg')
    workflow.connect(fssource, ('aparc_aseg', pickfirst), roidevplot,
                     'inputspec.aparc_aseg')

    workflow.connect(infosource, 'subject_id', roidevplot, 'inputspec.subject')
    workflow.connect(infosource, 'subject_id', roisnrplot, 'inputspec.subject')

    tablecombine = pe.MapNode(util.Function(
        input_names=['roidev', 'roisnr', 'imagetable'],
        output_names=['imagetable'],
        function=combine_table),
                              name='combinetable',
                              iterfield=['roidev', 'roisnr', 'imagetable'])

    tablereduce = pe.MapNode(util.Function(
        input_names=['imagetable', 'custom_LUT_file'],
        output_names=['reduced_imagetable'],
        function=reduce_table),
                             name='reducetable',
                             iterfield=['imagetable'])

    adnormplot = pe.MapNode(util.Function(
        input_names=['ADnorm', 'TR', 'norm_thresh', 'out'],
        output_names=['plot'],
        function=plot_ADnorm),
                            name='ADnormplot',
                            iterfield=['ADnorm', 'out'])
    adnormplot.inputs.norm_thresh = c.norm_thresh
    workflow.connect(art_info, 'out', adnormplot, 'out')

    convert = pe.Node(interface=fs.MRIConvert(), name='converter')

    voltransform = pe.MapNode(interface=ApplyVolTransform(),
                              name='register',
                              iterfield=['source_file'])

    overlaynew = pe.MapNode(util.Function(
        input_names=['stat_image', 'background_image', 'threshold', "dB"],
        output_names=['fnames'],
        function=overlay_dB),
                            name='overlay_new',
                            iterfield=['stat_image'])
    overlaynew.inputs.dB = False
    overlaynew.inputs.threshold = 20

    overlaymask = pe.MapNode(util.Function(
        input_names=['stat_image', 'background_image', 'threshold'],
        output_names=['fnames'],
        function=overlay_new),
                             name='overlay_mask',
                             iterfield=['stat_image'])
    overlaymask.inputs.threshold = 0.5
    workflow.connect(convert, 'out_file', overlaymask, 'background_image')
    overlaymask2 = overlaymask.clone('acompcor_image')
    workflow.connect(convert, 'out_file', overlaymask2, 'background_image')
    workflow.connect(datagrabber, 'tcompcor', overlaymask, 'stat_image')
    workflow.connect(datagrabber, 'acompcor', overlaymask2, 'stat_image')

    workflow.connect(datagrabber, ('mean_image', sort), plotanat, 'brain')

    ts_and_spectra = spectrum_ts_table()

    timeseries_segstats = tsnr_roi(plot=False,
                                   name='timeseries_roi',
                                   roi=['all'],
                                   onsets=False)
    workflow.connect(inputspec, 'tsnr_detrended', timeseries_segstats,
                     'inputspec.tsnr_file')
    workflow.connect(inputspec, 'reg_file', timeseries_segstats,
                     'inputspec.reg_file')
    workflow.connect(infosource, 'subject_id', timeseries_segstats,
                     'inputspec.subject')
    workflow.connect(fssource, ('aparc_aseg', pickfirst), timeseries_segstats,
                     'inputspec.aparc_aseg')

    if not c.use_metadata:
        timeseries_segstats.inputs.inputspec.TR = c.TR
        ts_and_spectra.inputs.inputspec.tr = c.TR
    else:
        from .....base import load_json
        metadata_file = os.path.join(c.sink_dir, QAc.subjects[0],
                                     'preproc/metadata/metadata.json')
        meta_tr = load_json(metadata_file)["tr"]
        timeseries_segstats.inputs.inputspec.TR = meta_tr
        ts_and_spectra.inputs.inputspec.tr = meta_tr

    workflow.connect(timeseries_segstats, 'outputspec.roi_file',
                     ts_and_spectra, 'inputspec.stats_file')

    write_rep = pe.Node(interface=ReportSink(orderfields=[
        'Introduction', 'in_file', 'config_params', 'Art_Detect',
        'Global_Intensity', 'Mean_Functional', 'Ribbon', 'Mask',
        'motion_plot_translations', 'motion_plot_rotations', 'tsdiffana',
        'ADnorm', 'A_CompCor', 'T_CompCor', 'TSNR_Images', 'tsnr_roi_table'
    ]),
                        name='report_sink')
    write_rep.inputs.Introduction = "Quality Assurance Report for fMRI preprocessing."
    write_rep.inputs.base_directory = os.path.join(QAc.sink_dir)
    write_rep.inputs.report_name = "Preprocessing_Report"
    write_rep.inputs.json_sink = QAc.json_sink
    workflow.connect(infosource, 'subject_id', write_rep, 'container')
    workflow.connect(plotanat, 'images', write_rep, "Mean_Functional")
    write_rep.inputs.table_as_para = False
    # Define Inputs

    convert.inputs.out_type = 'niigz'
    convert.inputs.in_type = 'mgz'

    # Define Connections

    workflow.connect(inputspec, 'TR', adnormplot, 'TR')
    workflow.connect(inputspec, 'subject_id', fssource, 'subject_id')
    workflow.connect(inputspec, 'sd', fssource, 'subjects_dir')
    workflow.connect(inputspec, 'in_file', write_rep, 'in_file')
    workflow.connect(datagrabber, 'art_intensity', art_info, 'intensity_file')
    workflow.connect(datagrabber, ('art_stats', sort), art_info, 'stats_file')
    workflow.connect(inputspec, 'art_file', art_info, 'art_file')
    workflow.connect(art_info, ('table', to1table), write_rep, 'Art_Detect')
    workflow.connect(ts_and_spectra, 'outputspec.imagetable', tablecombine,
                     'imagetable')
    workflow.connect(art_info, 'intensity_plot', write_rep, 'Global_Intensity')
    workflow.connect(plot_m, 'fname_t', write_rep, 'motion_plot_translations')
    workflow.connect(plot_m, 'fname_r', write_rep, 'motion_plot_rotations')
    workflow.connect(inputspec, 'in_file', tsdiff, 'img')
    workflow.connect(tsdiff, "out_file", write_rep, "tsdiffana")
    workflow.connect(inputspec, ('config_params', totable), write_rep,
                     'config_params')
    workflow.connect(inputspec, 'reg_file', roidevplot, 'inputspec.reg_file')
    workflow.connect(inputspec, 'tsnr_stddev', roidevplot,
                     'inputspec.tsnr_file')
    workflow.connect(roidevplot, 'outputspec.roi_table', tablecombine,
                     'roidev')
    workflow.connect(inputspec, 'reg_file', roisnrplot, 'inputspec.reg_file')
    workflow.connect(inputspec, 'tsnr', roisnrplot, 'inputspec.tsnr_file')
    workflow.connect(roisnrplot, 'outputspec.roi_table', tablecombine,
                     'roisnr')

    if QAc.use_custom_ROI_list_file:
        workflow.connect(tablecombine, 'imagetable', tablereduce, 'imagetable')
        tablereduce.inputs.custom_LUT_file = QAc.custom_ROI_list_file
        workflow.connect(tablereduce, ('reduced_imagetable', to1table),
                         write_rep, 'tsnr_roi_table')
    else:
        workflow.connect(tablecombine, ('imagetable', to1table), write_rep,
                         'tsnr_roi_table')

    workflow.connect(inputspec, 'ADnorm', adnormplot, 'ADnorm')
    workflow.connect(adnormplot, 'plot', write_rep, 'ADnorm')
    workflow.connect(fssource, 'orig', convert, 'in_file')
    workflow.connect(convert, 'out_file', voltransform, 'target_file')
    workflow.connect(inputspec, 'reg_file', voltransform, 'reg_file')
    workflow.connect(inputspec, 'tsnr', voltransform, 'source_file')
    workflow.connect(plotribbon, 'images', write_rep, 'Ribbon')
    workflow.connect(voltransform, 'transformed_file', overlaynew,
                     'stat_image')
    workflow.connect(convert, 'out_file', overlaynew, 'background_image')

    workflow.connect(overlaynew, 'fnames', write_rep, 'TSNR_Images')
    workflow.connect(overlaymask, 'fnames', write_rep, 'T_CompCor')
    workflow.connect(overlaymask2, 'fnames', write_rep, 'A_CompCor')
    workflow.connect(plotmask, 'images', write_rep, 'Mask')

    workflow.write_graph()
    return workflow
Exemple #27
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.mrtrix3 as mrtrix3
        from clinica.lib.nipype.interfaces.mrtrix.preprocess import MRTransform
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.lib.nipype.interfaces.mrtrix3.tracking import Tractography
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.stream import cprint
        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.utils.mri_registration import convert_flirt_transformation_to_mrtrix_transformation

        # cprint('Building the pipeline...')

        # Nodes
        # =====

        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction",
                              interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = 't'
        select_node = npe.Node(name="Reg-0-DWI-B0Selection",
                               interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking",
                             interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(name="Reg-1-T12B0Registration",
                                  interface=fsl.FLIRT(
                                      dof=6,
                                      interp='spline',
                                      cost='normmi',
                                      cost_func='normmi',
                                  ))
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(name="Reg-0-T1-T1BrainConvertion",
                                      interface=fs.MRIConvert())
        wm_mask_conv_node = npe.Node(name="Reg-0-T1-WMMaskConvertion",
                                     interface=fs.MRIConvert())

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(name="Reg-2-WMTransformation",
                                     interface=fsl.ApplyXFM())
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=['in_file', 'in_config', 'in_lut', 'out_file'],
            interface=mrtrix3.LabelConvert())
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name='Reg-2-FSL2MrtrixConversion',
            interface=niu.Function(
                input_names=[
                    'in_source_image', 'in_reference_image', 'in_flirt_matrix',
                    'name_output_matrix'
                ],
                output_names=['out_mrtrix_matrix'],
                function=convert_flirt_transformation_to_mrtrix_transformation)
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform())

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(name="1a-ResponseEstimation",
                                   interface=mrtrix3.ResponseSD())
        resp_estim_node.inputs.algorithm = 'tournier'

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation",
                                  interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = 'csd'

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration",
                                interface=Tractography())
        tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        tck_gen_node.inputs.algorithm = 'iFOD2'

        # BUG: Info package does not exist
        # from nipype.interfaces.mrtrix3.base import Info
        # from distutils.version import LooseVersion
        #
        # if Info.looseversion() >= LooseVersion("3.0"):
        #     tck_gen_node.inputs.select = self.parameters['n_tracks']
        # elif Info.looseversion() <= LooseVersion("0.4"):
        #     tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        # else:
        #     from clinica.utils.exceptions import ClinicaException
        #     raise ClinicaException("Your MRtrix version is not supported.")

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(name="3-ConnectomeGeneration",
                                    iterfield=['in_parc', 'out_file'],
                                    interface=mrtrix3.BuildConnectome())

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file'],
            function=utils.print_begin_pipeline),
                                          iterfield='in_bids_or_caps_file',
                                          name='WriteBeginMessage')

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file', 'final_file'],
            function=utils.print_end_pipeline),
                                        iterfield=['in_bids_or_caps_file'],
                                        name='WriteEndMessage')

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name='CAPSFilenamesGeneration',
            interface=niu.Function(input_names='dwi_file',
                                   output_names=self.get_output_fields(),
                                   function=utils.get_caps_filenames))

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        self.connect([
            (self.input_node, print_begin_message,
             [('dwi_file', 'in_bids_or_caps_file')]),  # noqa
            (self.input_node, caps_filenames_node, [('dwi_file', 'dwi_file')]),
            # Response Estimation
            (self.input_node, resp_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (self.input_node, resp_estim_node,
             [('dwi_brainmask_file', 'in_mask')]),  # B0 brain mask # noqa
            (self.input_node, resp_estim_node, [('grad_fsl', 'grad_fsl')
                                                ]),  # bvecs and bvals # noqa
            (caps_filenames_node, resp_estim_node,
             [('response', 'wm_file')]),  # output response filename # noqa
            # FOD Estimation
            (self.input_node, fod_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (resp_estim_node, fod_estim_node,
             [('wm_file', 'wm_txt')]),  # Response (txt file) # noqa
            (self.input_node, fod_estim_node,
             [('dwi_brainmask_file', 'mask_file')]),  # B0 brain mask # noqa
            (self.input_node, fod_estim_node,
             [('grad_fsl', 'grad_fsl')]),  # T1-to-B0 matrix file # noqa
            (caps_filenames_node, fod_estim_node,
             [('fod', 'wm_odf')]),  # output odf filename # noqa
            # Tracts Generation
            (fod_estim_node, tck_gen_node, [('wm_odf', 'in_file')]
             ),  # ODF file # noqa
            (caps_filenames_node, tck_gen_node,
             [('tracts', 'out_file')]),  # output tck filename # noqa
            # Label Conversion
            (self.input_node, label_convert_node, [('atlas_files', 'in_file')]
             ),  # atlas image files # noqa
            (caps_filenames_node, label_convert_node, [
                ('nodes', 'out_file')
            ]),  # converted atlas image filenames # noqa
            # Connectomes Generation
            (tck_gen_node, conn_gen_node, [('out_file', 'in_file')]),  # noqa
            (caps_filenames_node, conn_gen_node, [('connectomes', 'out_file')
                                                  ]),  # noqa
        ])
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                # MGZ Files Conversion
                (self.input_node, t1_brain_conv_node, [('t1_brain_file',
                                                        'in_file')]),  # noqa
                (self.input_node, wm_mask_conv_node, [('wm_mask_file',
                                                       'in_file')]),  # noqa
                # B0 Extraction
                (self.input_node, split_node, [('dwi_file', 'in_file')]
                 ),  # noqa
                (split_node, select_node, [('out_files', 'inlist')]),  # noqa
                # Masking
                (select_node, mask_node, [('out', 'in_file')]),  # B0 # noqa
                (self.input_node, mask_node,
                 [('dwi_brainmask_file', 'mask_file')]),  # Brain mask # noqa
                # T1-to-B0 Registration
                (t1_brain_conv_node, t12b0_reg_node, [('out_file', 'in_file')]
                 ),  # Brain # noqa
                (mask_node, t12b0_reg_node, [('out_file', 'reference')
                                             ]),  # B0 brain-masked # noqa
                # WM Transformation
                (wm_mask_conv_node, wm_transform_node,
                 [('out_file', 'in_file')]),  # Brain mask # noqa
                (mask_node, wm_transform_node, [('out_file', 'reference')
                                                ]),  # BO brain-masked # noqa
                (t12b0_reg_node, wm_transform_node, [
                    ('out_matrix_file', 'in_matrix_file')
                ]),  # T1-to-B0 matrix file # noqa
                # FSL flirt matrix to MRtrix matrix Conversion
                (t1_brain_conv_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_source_image')]),  # noqa
                (mask_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_reference_image')]),  # noqa
                (t12b0_reg_node, fsl2mrtrix_conv_node,
                 [('out_matrix_file', 'in_flirt_matrix')]),  # noqa
                # Apply registration without resampling on parcellations
                (label_convert_node, parc_transform_node,
                 [('out_file', 'in_files')]),  # noqa
                (fsl2mrtrix_conv_node, parc_transform_node,
                 [('out_mrtrix_matrix', 'linear_transform')]),  # noqa
                (caps_filenames_node, parc_transform_node,
                 [('nodes', 'out_filename')]),  # noqa
            ])
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                (wm_transform_node, tck_gen_node, [('out_file', 'seed_image')
                                                   ]),  # noqa
                (parc_transform_node, conn_gen_node, [('out_file', 'in_parc')
                                                      ]),  # noqa
                (parc_transform_node, self.output_node, [('out_file', 'nodes')
                                                         ]),  # noqa
            ])
        elif self.parameters['dwi_space'] == 'T1w':
            self.connect([
                (self.input_node, tck_gen_node, [('wm_mask_file', 'seed_image')
                                                 ]),  # noqa
                (label_convert_node, conn_gen_node, [('out_file', 'in_parc')
                                                     ]),  # noqa
                (label_convert_node, self.output_node, [('out_file', 'nodes')
                                                        ]),  # noqa
            ])
        else:
            raise ClinicaCAPSError(
                'Bad preprocessed DWI space. Please check your CAPS '
                'folder.')
        # Outputs
        # -------
        self.connect([
            (resp_estim_node, self.output_node, [('wm_file', 'response')]),
            (fod_estim_node, self.output_node, [('wm_odf', 'fod')]),
            (tck_gen_node, self.output_node, [('out_file', 'tracts')]),
            (conn_gen_node, self.output_node, [('out_file', 'connectomes')]),
            (self.input_node, print_end_message, [('dwi_file',
                                                   'in_bids_or_caps_file')]),
            (conn_gen_node, print_end_message, [('out_file', 'final_file')]),
        ])
def create_coreg_pipeline(name='coreg'):

    # fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # initiate workflow
    coreg = Workflow(name='coreg')

    #inputnode
    inputnode = Node(util.IdentityInterface(fields=[
        'epi_median',
        'fs_subjects_dir',
        'fs_subject_id',
        'uni_highres',
    ]),
                     name='inputnode')

    # outputnode
    outputnode = Node(util.IdentityInterface(fields=[
        'uni_lowres', 'epi2lowres', 'epi2lowres_mat', 'epi2lowres_dat',
        'highres2lowres', 'highres2lowres_mat', 'highres2lowres_dat',
        'epi2highres_lin', 'epi2highres_lin_mat', 'epi2highres_lin_itk'
    ]),
                      name='outputnode')

    # convert mgz head file for reference
    fs_import = Node(interface=nio.FreeSurferSource(), name='fs_import')

    brain_convert = Node(fs.MRIConvert(out_type='niigz',
                                       out_file='uni_lowres.nii.gz'),
                         name='brain_convert')

    coreg.connect([(inputnode, fs_import, [('fs_subjects_dir', 'subjects_dir'),
                                           ('fs_subject_id', 'subject_id')]),
                   (fs_import, brain_convert, [('brain', 'in_file')]),
                   (brain_convert, outputnode, [('out_file', 'uni_lowres')])])

    # linear registration epi median to lowres mp2rage with bbregister
    bbregister_epi = Node(fs.BBRegister(contrast_type='t2',
                                        out_fsl_file='epi2lowres.mat',
                                        out_reg_file='epi2lowres.dat',
                                        registered_file='epi2lowres.nii.gz',
                                        init='fsl',
                                        epi_mask=True),
                          name='bbregister_epi')

    coreg.connect([
        (inputnode, bbregister_epi, [('fs_subjects_dir', 'subjects_dir'),
                                     ('fs_subject_id', 'subject_id'),
                                     ('epi_median', 'source_file')]),
        (bbregister_epi, outputnode, [('out_fsl_file', 'epi2lowres_mat'),
                                      ('out_reg_file', 'epi2lowres_dat'),
                                      ('registered_file', 'epi2lowres')])
    ])

    # linear register highres mp2rage to lowres mp2rage
    bbregister_anat = Node(fs.BBRegister(
        contrast_type='t1',
        out_fsl_file='highres2lowres.mat',
        out_reg_file='highres2lowres.dat',
        registered_file='highres2lowres.nii.gz',
        init='fsl'),
                           name='bbregister_anat')

    coreg.connect([
        (inputnode, bbregister_anat, [('fs_subjects_dir', 'subjects_dir'),
                                      ('fs_subject_id', 'subject_id'),
                                      ('uni_highres', 'source_file')]),
        (bbregister_anat, outputnode, [('out_fsl_file', 'highres2lowres_mat'),
                                       ('out_reg_file', 'highres2lowres_dat'),
                                       ('registered_file', 'highres2lowres')])
    ])

    # invert highres2lowres transform
    invert = Node(fsl.ConvertXFM(invert_xfm=True), name='invert')
    coreg.connect([(bbregister_anat, invert, [('out_fsl_file', 'in_file')])])

    # concatenate epi2highres transforms
    concat = Node(fsl.ConvertXFM(concat_xfm=True,
                                 out_file='epi2highres_lin.mat'),
                  name='concat')
    coreg.connect([(bbregister_epi, concat, [('out_fsl_file', 'in_file')]),
                   (invert, concat, [('out_file', 'in_file2')]),
                   (concat, outputnode, [('out_file', 'epi2higres_lin_mat')])])

    # convert epi2highres transform into itk format
    itk = Node(interface=c3.C3dAffineTool(fsl2ras=True,
                                          itk_transform='epi2highres_lin.txt'),
               name='itk')

    coreg.connect([(inputnode, itk, [('epi_median', 'source_file'),
                                     ('uni_highres', 'reference_file')]),
                   (concat, itk, [('out_file', 'transform_file')]),
                   (itk, outputnode, [('itk_transform', 'epi2highres_lin_itk')
                                      ])])

    # transform epi to highres
    epi2highres = Node(ants.ApplyTransforms(
        dimension=3,
        output_image='epi2highres_lin.nii.gz',
        interpolation='BSpline',
    ),
                       name='epi2highres')

    coreg.connect([
        (inputnode, epi2highres, [('uni_highres', 'reference_image'),
                                  ('epi_median', 'input_image')]),
        (itk, epi2highres, [('itk_transform', 'transforms')]),
        (epi2highres, outputnode, [('output_image', 'epi2highres_lin')])
    ])

    return coreg
"""
Set up volume normalization workflow
------------------------------------

The volume analysis is performed in individual space. Therefore, post analysis
we normalize the contrast images to MNI space.
"""

volnorm = pe.Workflow(name='volnormconimages')
"""
Use :class:`nipype.interfaces.freesurfer.MRIConvert` to convert the brainmask,
an mgz file and the contrast images (nifti-1 img/hdr pairs), to single volume
nifti images.
"""

convert = pe.Node(interface=fs.MRIConvert(out_type='nii'), name='convert2nii')
convert2 = pe.MapNode(interface=fs.MRIConvert(out_type='nii'),
                      iterfield=['in_file'],
                      name='convertimg2nii')
"""
Use :class:`nipype.interfaces.spm.Segment` to segment the structural image and
generate the transformation file to MNI space.

.. note::

   Segment takes longer than usual because the nose is wrapped behind
   the head in the structural image.
"""

segment = pe.Node(interface=spm.Segment(), name='segment')
"""
Exemple #30
0
    def create_workflow(self, flow, inputnode, outputnode):
        # resampling diffusion image and setting output type to short
        fs_mriconvert = pe.Node(interface=fs.MRIConvert(out_type='nii',out_file='diffusion_resampled.nii'),name="diffusion_resample")
        fs_mriconvert.inputs.vox_size = self.config.resampling
        fs_mriconvert.inputs.resample_type = self.config.interpolation
        flow.connect([(inputnode,fs_mriconvert,[('diffusion','in_file')])])

        fs_mriconvert_wm_mask = pe.Node(interface=fs.MRIConvert(out_type='nii',resample_type='nearest',out_file='wm_mask_resampled.nii'),name="mask_resample")
        fs_mriconvert_wm_mask.inputs.vox_size = self.config.resampling
        flow.connect([(inputnode,fs_mriconvert_wm_mask,[('wm_mask_registered','in_file')])])
        
        if self.config.processing_tool != 'DTK':
    
            fs_mriconvert_ROIs = pe.MapNode(interface=fs.MRIConvert(out_type='nii',resample_type='nearest'),name="ROIs_resample",iterfield=['in_file'])
            fs_mriconvert_ROIs.inputs.vox_size = self.config.resampling
            flow.connect([(inputnode,fs_mriconvert_ROIs,[('roi_volumes','in_file')])])
            
            if self.config.dilate_rois:
                dilate_rois = pe.MapNode(interface=fsl.DilateImage(),iterfield=['in_file'],name='dilate_rois')
                dilate_rois.inputs.operation = 'modal'
                flow.connect([
                              (fs_mriconvert_ROIs,dilate_rois,[("out_file","in_file")]),
                              (dilate_rois,outputnode,[("out_file","roi_volumes")])
                            ])
            else:
                flow.connect([
                            (fs_mriconvert_ROIs,outputnode,[("out_file","roi_volumes")])
                            ])
        else:
            flow.connect([
                          (inputnode,outputnode,[("roi_volumes","roi_volumes")])
                        ])
        
        # Reconstruction
        if self.config.processing_tool == 'DTK':
            recon_flow = create_dtk_recon_flow(self.config.dtk_recon_config)
            flow.connect([
                        (inputnode,recon_flow,[('diffusion','inputnode.diffusion')]),
                        (fs_mriconvert,recon_flow,[('out_file','inputnode.diffusion_resampled')]),
                        ])
        elif self.config.processing_tool == 'MRtrix':
            recon_flow = create_mrtrix_recon_flow(self.config.mrtrix_recon_config)
            flow.connect([
                        (inputnode,recon_flow,[('diffusion','inputnode.diffusion')]),
                        (inputnode,recon_flow,[('grad','inputnode.grad')]),
                        (fs_mriconvert,recon_flow,[('out_file','inputnode.diffusion_resampled')]),
			            (fs_mriconvert_wm_mask, recon_flow,[('out_file','inputnode.wm_mask_resampled')]),
                        (recon_flow,outputnode,[("outputnode.FA","gFA")]),
                        ])

        elif self.config.processing_tool == 'Camino':
            recon_flow = create_camino_recon_flow(self.config.camino_recon_config)
            flow.connect([
                        (inputnode,recon_flow,[('diffusion','inputnode.diffusion')]),
                        (fs_mriconvert,recon_flow,[('out_file','inputnode.diffusion_resampled')]),
                        (fs_mriconvert_wm_mask, recon_flow,[('out_file','inputnode.wm_mask_resampled')]),
                        (recon_flow,outputnode,[("outputnode.FA","gFA")])
                        ])
        
        elif self.config.processing_tool == 'FSL':
            recon_flow = create_fsl_recon_flow(self.config.fsl_recon_config)
            flow.connect([
                        (fs_mriconvert,recon_flow,[('out_file','inputnode.diffusion_resampled')]),
                        (fs_mriconvert_wm_mask,recon_flow,[('out_file','inputnode.wm_mask_resampled')])
                        ])

        elif self.config.processing_tool == 'Gibbs':
            recon_flow = create_gibbs_recon_flow(self.config.gibbs_recon_config)
            flow.connect([
                          (fs_mriconvert,recon_flow,[("out_file","inputnode.diffusion_resampled")])
                        ])
        
        # Tracking
        if self.config.processing_tool == 'DTK':
            track_flow = create_dtb_tracking_flow(self.config.dtb_tracking_config)
            flow.connect([
                        (inputnode, track_flow,[('wm_mask_registered','inputnode.wm_mask_registered')]),
                        (recon_flow, track_flow,[('outputnode.DWI','inputnode.DWI')])
                        ])

        elif self.config.processing_tool == 'MRtrix':
            track_flow = create_mrtrix_tracking_flow(self.config.mrtrix_tracking_config)
            flow.connect([
                        (fs_mriconvert_wm_mask, track_flow,[('out_file','inputnode.wm_mask_resampled')]),
                        (recon_flow, outputnode,[('outputnode.DWI','fod_file')]),
                        (recon_flow, track_flow,[('outputnode.DWI','inputnode.DWI'),('outputnode.grad','inputnode.grad')]),
                        (dilate_rois,track_flow,[('out_file','inputnode.gm_registered')])
			             #(recon_flow, track_flow,[('outputnode.SD','inputnode.SD')]),
                        ])

           #  if self.config.diffusion_model == 'Probabilistic':
           #      flow.connect([
    			    # (dilate_rois,track_flow,[('out_file','inputnode.gm_registered')]),
    			    # ])

            flow.connect([
                        (track_flow,outputnode,[('outputnode.track_file','track_file')])
                        ])

        elif self.config.processing_tool == 'Camino':
            track_flow = create_camino_tracking_flow(self.config.camino_tracking_config)
            flow.connect([
                        (fs_mriconvert_wm_mask, track_flow,[('out_file','inputnode.wm_mask_resampled')]),
                        (recon_flow, track_flow,[('outputnode.DWI','inputnode.DWI'), ('outputnode.grad','inputnode.grad')])
                        ])
            if self.config.diffusion_model == 'Probabilistic':
                flow.connect([
                    (dilate_rois,track_flow,[('out_file','inputnode.gm_registered')]),
                    ])
            flow.connect([
                        (track_flow,outputnode,[('outputnode.track_file','track_file')])
                        ])
        
        elif self.config.processing_tool == 'FSL':
            track_flow = create_fsl_tracking_flow(self.config.fsl_tracking_config)
            flow.connect([
                        (fs_mriconvert_wm_mask,track_flow,[('out_file','inputnode.wm_mask_resampled')]),
                        (dilate_rois,track_flow,[('out_file','inputnode.gm_registered')]),
                        (recon_flow,track_flow,[('outputnode.fsamples','inputnode.fsamples')]),
                        (recon_flow,track_flow,[('outputnode.phsamples','inputnode.phsamples')]),
                        (recon_flow,track_flow,[('outputnode.thsamples','inputnode.thsamples')]),
                        ])
            flow.connect([
                        (track_flow,outputnode,[("outputnode.targets","track_file")]),
                        ])
        elif self.config.processing_tool == 'Gibbs':
            track_flow = create_gibbs_tracking_flow(self.config.gibbs_tracking_config)
            flow.connect([
                          (fs_mriconvert_wm_mask, track_flow,[('out_file','inputnode.wm_mask_resampled')]),
                          (recon_flow,track_flow,[("outputnode.recon_file","inputnode.recon_file")]),
                          (track_flow,outputnode,[('outputnode.track_file','track_file')])
                        ])
            
                        
        if self.config.processing_tool == 'DTK':
            flow.connect([
			    (recon_flow,outputnode, [("outputnode.gFA","gFA"),("outputnode.skewness","skewness"),
			                             ("outputnode.kurtosis","kurtosis"),("outputnode.P0","P0")]),
			    (track_flow,outputnode, [('outputnode.track_file','track_file')])
			    ])

        temp_node = pe.Node(interface=util.IdentityInterface(fields=["diffusion_model"]),name="diffusion_model")
        temp_node.inputs.diffusion_model = self.config.diffusion_model
        flow.connect([
                    (temp_node,outputnode,[("diffusion_model","diffusion_model")])
                    ])