예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--scan', type=str, help='/path/to/scan/file')
    parser.add_argument('-m', '--mask', type=str, help='/path/to/mask/file')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        help='/path/to/output/file')

    args = parser.parse_args()

    # Define variables
    sequenceFn = args.scan
    maskFn = args.mask
    outFn = args.output

    # Load the sequence
    sequence, coords1 = mil.loadBOLD(sequenceFn)
    # Load the mask
    mask, coords2 = mil.loadBOLD(maskFn)
    print("sequence", sequence.shape)
    print("mask", mask.shape)

    masking = ApplyMask()
    masking.inputs.in_file = sequenceFn
    masking.inputs.mask_file = maskFn
    masking.inputs.out_file = outFn
    masking.run()
예제 #2
0
def test_FASTRPT(monkeypatch, segments, reference, reference_mask):
    """ test FAST with the two options for segments """
    from nipype.interfaces.fsl.maths import ApplyMask

    def _agg(objekt, runtime):
        outputs = Bunch(
            tissue_class_map=os.path.join(
                datadir, 'testFASTRPT-tissue_class_map.nii.gz'),
            tissue_class_files=[
                os.path.join(datadir,
                             'testFASTRPT-tissue_class_files0.nii.gz'),
                os.path.join(datadir,
                             'testFASTRPT-tissue_class_files1.nii.gz'),
                os.path.join(datadir, 'testFASTRPT-tissue_class_files2.nii.gz')
            ])
        return outputs

    # Patch the _run_interface method
    monkeypatch.setattr(FASTRPT, '_run_interface', _run_interface_mock)
    monkeypatch.setattr(FASTRPT, 'aggregate_outputs', _agg)

    brain = ApplyMask(in_file=reference,
                      mask_file=reference_mask).run().outputs.out_file
    fast_rpt = FASTRPT(in_files=brain,
                       generate_report=True,
                       no_bias=True,
                       probability_maps=True,
                       segments=segments,
                       out_basename='test')
    _smoke_test_report(fast_rpt,
                       'testFAST_%ssegments.svg' % ('no' * int(not segments)))
예제 #3
0
def test_ApplyMask_outputs():
    output_map = dict(out_file=dict(), )
    outputs = ApplyMask.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
예제 #4
0
def test_FASTRPT(monkeypatch, segments, reference, reference_mask):
    """ test FAST with the two options for segments """
    from nipype.interfaces.fsl.maths import ApplyMask

    def _agg(objekt, runtime):
        outputs = objekt.output_spec()
        outputs.out_report = os.path.join(runtime.cwd,
                                          objekt.inputs.out_report)
        outputs.tissue_class_map = os.path.join(
            datadir, "testFASTRPT-tissue_class_map.nii.gz")
        outputs.tissue_class_files = [
            os.path.join(datadir, "testFASTRPT-tissue_class_files0.nii.gz"),
            os.path.join(datadir, "testFASTRPT-tissue_class_files1.nii.gz"),
            os.path.join(datadir, "testFASTRPT-tissue_class_files2.nii.gz"),
        ]
        return outputs

    # Patch the _run_interface method
    monkeypatch.setattr(FASTRPT, "_run_interface", _run_interface_mock)
    monkeypatch.setattr(FASTRPT, "aggregate_outputs", _agg)

    brain = (pe.Node(ApplyMask(in_file=reference, mask_file=reference_mask),
                     name="brain").run().outputs.out_file)
    fast_rpt = FASTRPT(
        in_files=brain,
        generate_report=True,
        no_bias=True,
        probability_maps=True,
        segments=segments,
        out_basename="test",
    )
    _smoke_test_report(fast_rpt,
                       "testFAST_%ssegments.svg" % ("no" * int(not segments)))
예제 #5
0
def test_ApplyMask_outputs():
    output_map = dict(out_file=dict(),
    )
    outputs = ApplyMask.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
예제 #6
0
def test_ApplyMask_inputs():
    input_map = dict(
        args=dict(argstr='%s', ),
        environ=dict(
            nohash=True,
            usedefault=True,
        ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        in_file=dict(
            argstr='%s',
            mandatory=True,
            position=2,
        ),
        internal_datatype=dict(
            argstr='-dt %s',
            position=1,
        ),
        mask_file=dict(
            argstr='-mas %s',
            mandatory=True,
            position=4,
        ),
        nan2zeros=dict(
            argstr='-nan',
            position=3,
        ),
        out_file=dict(
            argstr='%s',
            genfile=True,
            hash_files=False,
            position=-2,
        ),
        output_datatype=dict(
            argstr='-odt %s',
            position=-1,
        ),
        output_type=dict(),
        terminal_output=dict(
            mandatory=True,
            nohash=True,
        ),
    )
    inputs = ApplyMask.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
예제 #7
0
def test_ApplyMask_inputs():
    input_map = dict(ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    nan2zeros=dict(position=3,
    argstr='-nan',
    ),
    out_file=dict(hash_files=False,
    genfile=True,
    position=-2,
    argstr='%s',
    ),
    args=dict(argstr='%s',
    ),
    internal_datatype=dict(position=1,
    argstr='-dt %s',
    ),
    terminal_output=dict(mandatory=True,
    nohash=True,
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    in_file=dict(position=2,
    mandatory=True,
    argstr='%s',
    ),
    mask_file=dict(position=4,
    mandatory=True,
    argstr='-mas %s',
    ),
    output_type=dict(),
    output_datatype=dict(position=-1,
    argstr='-odt %s',
    ),
    )
    inputs = ApplyMask.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
예제 #8
0
def init_brain_extraction_wf(name='brain_extraction_wf',
                             in_template='OASIS',
                             use_float=True,
                             normalization_quality='precise',
                             omp_nthreads=None,
                             mem_gb=3.0,
                             modality='T1',
                             atropos_refine=True,
                             atropos_use_random_seed=True,
                             atropos_model=None):
    """
    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).

    The official workflow is built as follows (and this implementation
    follows the same organization):

      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask


    .. workflow::
        :graph2use: orig
        :simple_form: yes
        from niworkflows.anat import init_brain_extraction_wf
        wf = init_brain_extraction_wf()


    **Parameters**

        in_template : str
            Name of the skull-stripping template ('OASIS', 'NKI', or
            path).
            The brain template from which regions will be projected
            Anatomical template created using e.g. LPBA40 data set with
            ``buildtemplateparallel.sh`` in ANTs.
            The workflow will automatically search for a brain probability
            mask created using e.g. LPBA40 data set which have brain masks
            defined, and warped to anatomical template and
            averaged resulting in a probability image.
        use_float : bool
            Whether single precision should be used
        normalization_quality : str
            Use more precise or faster registration parameters
            (default: ``precise``, other possible values: ``testing``)
        omp_nthreads : int
            Maximum number of threads an individual process may use
        mem_gb : float
            Estimated peak memory consumption of the most hungry nodes
            in the workflow
        modality : str
            Sequence type of the first input image ('T1', 'T2', or 'FLAIR')
        atropos_refine : bool
            Enables or disables the whole ATROPOS sub-workflow
        atropos_use_random_seed : bool
            Whether ATROPOS should generate a random seed based on the
            system's clock
        atropos_model : tuple or None
            Allows to specify a particular segmentation model, overwriting
            the defaults based on ``modality``
        name : str, optional
            Workflow name (default: antsBrainExtraction)


    **Inputs**

        in_files
            List of input anatomical images to be brain-extracted,
            typically T1-weighted.
            If a list of anatomical images is provided, subsequently
            specified images are used during the segmentation process.
            However, only the first image is used in the registration
            of priors.
            Our suggestion would be to specify the T1w as the first image.
        in_mask
            (optional) Mask used for registration to limit the metric
            computation to a specific region.


    **Outputs**

        bias_corrected
            The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
            correction.
        out_mask
            Calculated brain mask
        bias_image
            The :abbr:`INU (intensity non-uniformity)` field estimated for each
            input in ``in_files``
        out_segm
            Output segmentation by ATROPOS
        out_tpms
            Output :abbr:`TPMs (tissue probability maps)` by ATROPOS


    """
    wf = pe.Workflow(name)

    template_path = None
    if in_template in TEMPLATE_MAP:
        template_path = get_dataset(in_template)
    else:
        template_path = in_template

    mod = ('%sw' % modality[:2].upper()
           if modality.upper().startswith('T') else modality.upper())

    # Append template modality
    potential_targets = list(Path(template_path).glob('*_%s.nii.gz' % mod))
    if not potential_targets:
        raise ValueError('No %s template was found under "%s".' %
                         (mod, template_path))

    tpl_target_path = str(potential_targets[0])
    target_basename = '_'.join(tpl_target_path.split('_')[:-1])

    # Get probabilistic brain mask if available
    tpl_mask_path = '%s_class-brainmask_probtissue.nii.gz' % target_basename
    # Fall-back to a binary mask just in case
    if not os.path.exists(tpl_mask_path):
        tpl_mask_path = '%s_brainmask.nii.gz' % target_basename

    if not os.path.exists(tpl_mask_path):
        raise ValueError(
            'Probability map for the brain mask associated to this template '
            '"%s" not found.' % tpl_mask_path)

    if omp_nthreads is None or omp_nthreads < 1:
        omp_nthreads = cpu_count()

    inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
                        name='inputnode')

    # Try to find a registration mask, set if available
    tpl_regmask_path = '%s_label-BrainCerebellumRegistration_roi.nii.gz' % target_basename
    if os.path.exists(tpl_regmask_path):
        inputnode.inputs.in_mask = tpl_regmask_path

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bias_corrected', 'out_mask', 'bias_image', 'out_segm']),
                         name='outputnode')

    trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity',
                                 op2='0.01 0.999 256'),
                       name='truncate_images',
                       iterfield=['op1'])
    inu_n4 = pe.MapNode(N4BiasFieldCorrection(dimension=3,
                                              save_bias=True,
                                              copy_header=True,
                                              n_iterations=[50] * 4,
                                              convergence_threshold=1e-7,
                                              shrink_factor=4,
                                              bspline_fitting_distance=200),
                        n_procs=omp_nthreads,
                        name='inu_n4',
                        iterfield=['input_image'])

    res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                              apply_smoothing=True),
                       name='res_tmpl')
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                                apply_smoothing=True),
                         name='res_target')

    lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                       name='lap_tmpl')
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                         name='lap_target')
    mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name='mrg_target')

    # Initialize transforms with antsAI
    init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.2),
                          transform=('Affine', 0.1),
                          search_factor=(20, 0.12),
                          principal_axes=False,
                          convergence=(10, 1e-6, 10),
                          verbose=True),
                       name='init_aff',
                       n_procs=omp_nthreads)

    if parseversion(Registration().version) > Version('2.2.0'):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    norm = pe.Node(Registration(from_file=pkgr_fn(
        'niworkflows.data', 'antsBrainExtraction_%s.json' %
        normalization_quality)),
                   name='norm',
                   n_procs=omp_nthreads,
                   mem_gb=mem_gb)
    norm.inputs.float = use_float
    fixed_mask_trait = 'fixed_image_mask'
    if parseversion(Registration().version) >= Version('2.2.0'):
        fixed_mask_trait += 's'

    map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian',
                                            float=True),
                            name='map_brainmask',
                            mem_gb=1)
    map_brainmask.inputs.input_image = tpl_mask_path

    thr_brainmask = pe.Node(ThresholdImage(dimension=3,
                                           th_low=0.5,
                                           th_high=1.0,
                                           inside_value=1,
                                           outside_value=0),
                            name='thr_brainmask')

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'),
                            name='dil_brainmask')
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'),
                            name='get_brainmask')

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=['in_file'],
                            name='apply_mask')

    wf.connect([
        (inputnode, trunc, [('in_files', 'op1')]),
        (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
        (inputnode, norm, [('in_mask', fixed_mask_trait)]),
        (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
        (trunc, inu_n4, [('output_image', 'input_image')]),
        (inu_n4, res_target, [(('output_image', _pop), 'input_image')]),
        (inu_n4, lap_target, [(('output_image', _pop), 'op1')]),
        (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
        (res_target, init_aff, [('output_image', 'moving_image')]),
        (inu_n4, mrg_target, [('output_image', 'in1')]),
        (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
        (lap_target, mrg_target, [('output_image', 'in2')]),
        (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
        (mrg_tmpl, norm, [('out', 'fixed_image')]),
        (mrg_target, norm, [('out', 'moving_image')]),
        (norm, map_brainmask, [('reverse_invert_flags',
                                'invert_transform_flags'),
                               ('reverse_transforms', 'transforms')]),
        (map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
        (thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
        (dil_brainmask, get_brainmask, [('output_image', 'op1')]),
        (inu_n4, apply_mask, [('output_image', 'in_file')]),
        (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        (get_brainmask, outputnode, [('output_image', 'out_mask')]),
        (apply_mask, outputnode, [('out_file', 'bias_corrected')]),
        (inu_n4, outputnode, [('bias_image', 'bias_image')]),
    ])

    if atropos_refine:
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model
            or list(ATROPOS_MODELS[modality].values()))

        wf.disconnect([
            (get_brainmask, outputnode, [('output_image', 'out_mask')]),
            (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]),
            (get_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask')
                                         ]),
            (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask')]),
            (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]),
            (atropos_wf, outputnode, [('outputnode.out_segm', 'out_segm'),
                                      ('outputnode.out_tpms', 'out_tpms')])
        ])
    return wf
예제 #9
0
def Lesion_extractor(
    name='Lesion_Extractor',
    wf_name='Test',
    base_dir='/homes_unix/alaurent/',
    input_dir=None,
    subjects=None,
    main=None,
    acc=None,
    atlas='/homes_unix/alaurent/cbstools-public-master/atlases/brain-segmentation-prior3.0/brain-atlas-quant-3.0.8.txt'
):

    wf = Workflow(wf_name)
    wf.base_dir = base_dir

    #file = open(subjects,"r")
    #subjects = file.read().split("\n")
    #file.close()

    # Subject List
    subjectList = Node(IdentityInterface(fields=['subject_id'],
                                         mandatory_inputs=True),
                       name="subList")
    subjectList.iterables = ('subject_id', [
        sub for sub in subjects if sub != '' and sub != '\n'
    ])

    # T1w and FLAIR
    scanList = Node(DataGrabber(infields=['subject_id'],
                                outfields=['T1', 'FLAIR']),
                    name="scanList")
    scanList.inputs.base_directory = input_dir
    scanList.inputs.ignore_exception = False
    scanList.inputs.raise_on_empty = True
    scanList.inputs.sort_filelist = True
    #scanList.inputs.template = '%s/%s.nii'
    #scanList.inputs.template_args = {'T1': [['subject_id','T1*']],
    #                                 'FLAIR': [['subject_id','FLAIR*']]}
    scanList.inputs.template = '%s/anat/%s'
    scanList.inputs.template_args = {
        'T1': [['subject_id', '*_T1w.nii.gz']],
        'FLAIR': [['subject_id', '*_FLAIR.nii.gz']]
    }
    wf.connect(subjectList, "subject_id", scanList, "subject_id")

    #     # T1w and FLAIR
    #     dg = Node(DataGrabber(outfields=['T1', 'FLAIR']), name="T1wFLAIR")
    #     dg.inputs.base_directory = "/homes_unix/alaurent/LesionPipeline"
    #     dg.inputs.template = "%s/NIFTI/*.nii.gz"
    #     dg.inputs.template_args['T1']=[['7']]
    #     dg.inputs.template_args['FLAIR']=[['9']]
    #     dg.inputs.sort_filelist=True

    # Reorient Volume
    T1Conv = Node(Reorient2Std(), name="ReorientVolume")
    T1Conv.inputs.ignore_exception = False
    T1Conv.inputs.terminal_output = 'none'
    T1Conv.inputs.out_file = "T1_reoriented.nii.gz"
    wf.connect(scanList, "T1", T1Conv, "in_file")

    # Reorient Volume (2)
    T2flairConv = Node(Reorient2Std(), name="ReorientVolume2")
    T2flairConv.inputs.ignore_exception = False
    T2flairConv.inputs.terminal_output = 'none'
    T2flairConv.inputs.out_file = "FLAIR_reoriented.nii.gz"
    wf.connect(scanList, "FLAIR", T2flairConv, "in_file")

    # N3 Correction
    T1NUC = Node(N4BiasFieldCorrection(), name="N3Correction")
    T1NUC.inputs.dimension = 3
    T1NUC.inputs.environ = {'NSLOTS': '1'}
    T1NUC.inputs.ignore_exception = False
    T1NUC.inputs.num_threads = 1
    T1NUC.inputs.save_bias = False
    T1NUC.inputs.terminal_output = 'none'
    wf.connect(T1Conv, "out_file", T1NUC, "input_image")

    # N3 Correction (2)
    T2flairNUC = Node(N4BiasFieldCorrection(), name="N3Correction2")
    T2flairNUC.inputs.dimension = 3
    T2flairNUC.inputs.environ = {'NSLOTS': '1'}
    T2flairNUC.inputs.ignore_exception = False
    T2flairNUC.inputs.num_threads = 1
    T2flairNUC.inputs.save_bias = False
    T2flairNUC.inputs.terminal_output = 'none'
    wf.connect(T2flairConv, "out_file", T2flairNUC, "input_image")
    '''
    #####################
    ### PRE-NORMALIZE ###
    #####################
    To make sure there's no outlier values (negative, or really high) to offset the initialization steps
    '''

    # Intensity Range Normalization
    getMaxT1NUC = Node(ImageStats(op_string='-r'), name="getMaxT1NUC")
    wf.connect(T1NUC, 'output_image', getMaxT1NUC, 'in_file')

    T1NUCirn = Node(AbcImageMaths(), name="IntensityNormalization")
    T1NUCirn.inputs.op_string = "-div"
    T1NUCirn.inputs.out_file = "normT1.nii.gz"
    wf.connect(T1NUC, 'output_image', T1NUCirn, 'in_file')
    wf.connect(getMaxT1NUC, ('out_stat', getElementFromList, 1), T1NUCirn,
               "op_value")

    # Intensity Range Normalization (2)
    getMaxT2NUC = Node(ImageStats(op_string='-r'), name="getMaxT2")
    wf.connect(T2flairNUC, 'output_image', getMaxT2NUC, 'in_file')

    T2NUCirn = Node(AbcImageMaths(), name="IntensityNormalization2")
    T2NUCirn.inputs.op_string = "-div"
    T2NUCirn.inputs.out_file = "normT2.nii.gz"
    wf.connect(T2flairNUC, 'output_image', T2NUCirn, 'in_file')
    wf.connect(getMaxT2NUC, ('out_stat', getElementFromList, 1), T2NUCirn,
               "op_value")
    '''
    ########################
    #### COREGISTRATION ####
    ########################
    '''

    # Optimized Automated Registration
    T2flairCoreg = Node(FLIRT(), name="OptimizedAutomatedRegistration")
    T2flairCoreg.inputs.output_type = 'NIFTI_GZ'
    wf.connect(T2NUCirn, "out_file", T2flairCoreg, "in_file")
    wf.connect(T1NUCirn, "out_file", T2flairCoreg, "reference")
    '''    
    #########################
    #### SKULL-STRIPPING ####
    #########################
    '''

    # SPECTRE
    T1ss = Node(BET(), name="SPECTRE")
    T1ss.inputs.frac = 0.45  #0.4
    T1ss.inputs.mask = True
    T1ss.inputs.outline = True
    T1ss.inputs.robust = True
    wf.connect(T1NUCirn, "out_file", T1ss, "in_file")

    # Image Calculator
    T2ss = Node(ApplyMask(), name="ImageCalculator")
    wf.connect(T1ss, "mask_file", T2ss, "mask_file")
    wf.connect(T2flairCoreg, "out_file", T2ss, "in_file")
    '''
    ####################################
    #### 2nd LAYER OF N3 CORRECTION ####
    ####################################
    This time without the skull: there were some significant amounts of inhomogeneities leftover.
    '''

    # N3 Correction (3)
    T1ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction3")
    T1ssNUC.inputs.dimension = 3
    T1ssNUC.inputs.environ = {'NSLOTS': '1'}
    T1ssNUC.inputs.ignore_exception = False
    T1ssNUC.inputs.num_threads = 1
    T1ssNUC.inputs.save_bias = False
    T1ssNUC.inputs.terminal_output = 'none'
    wf.connect(T1ss, "out_file", T1ssNUC, "input_image")

    # N3 Correction (4)
    T2ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction4")
    T2ssNUC.inputs.dimension = 3
    T2ssNUC.inputs.environ = {'NSLOTS': '1'}
    T2ssNUC.inputs.ignore_exception = False
    T2ssNUC.inputs.num_threads = 1
    T2ssNUC.inputs.save_bias = False
    T2ssNUC.inputs.terminal_output = 'none'
    wf.connect(T2ss, "out_file", T2ssNUC, "input_image")
    '''
    ####################################
    ####    NORMALIZE FOR MGDM      ####
    ####################################
    This normalization is a bit aggressive: only useful to have a 
    cropped dynamic range into MGDM, but possibly harmful to further 
    processing, so the unprocessed images are passed to the subsequent steps.
    '''

    # Intensity Range Normalization
    getMaxT1ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT1ssNUC")
    wf.connect(T1ssNUC, 'output_image', getMaxT1ssNUC, 'in_file')

    T1ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization3")
    T1ssNUCirn.inputs.op_string = "-div"
    T1ssNUCirn.inputs.out_file = "normT1ss.nii.gz"
    wf.connect(T1ssNUC, 'output_image', T1ssNUCirn, 'in_file')
    wf.connect(getMaxT1ssNUC, ('out_stat', getElementFromList, 1), T1ssNUCirn,
               "op_value")

    # Intensity Range Normalization (2)
    getMaxT2ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT2ssNUC")
    wf.connect(T2ssNUC, 'output_image', getMaxT2ssNUC, 'in_file')

    T2ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization4")
    T2ssNUCirn.inputs.op_string = "-div"
    T2ssNUCirn.inputs.out_file = "normT2ss.nii.gz"
    wf.connect(T2ssNUC, 'output_image', T2ssNUCirn, 'in_file')
    wf.connect(getMaxT2ssNUC, ('out_stat', getElementFromList, 1), T2ssNUCirn,
               "op_value")
    '''
    ####################################
    ####      ESTIMATE CSF PV       ####
    ####################################
    Here we try to get a better handle on CSF voxels to help the segmentation step
    '''

    # Recursive Ridge Diffusion
    CSF_pv = Node(RecursiveRidgeDiffusion(), name='estimate_CSF_pv')
    CSF_pv.plugin_args = {'sbatch_args': '--mem 6000'}
    CSF_pv.inputs.ridge_intensities = "dark"
    CSF_pv.inputs.ridge_filter = "2D"
    CSF_pv.inputs.orientation = "undefined"
    CSF_pv.inputs.ang_factor = 1.0
    CSF_pv.inputs.min_scale = 0
    CSF_pv.inputs.max_scale = 3
    CSF_pv.inputs.propagation_model = "diffusion"
    CSF_pv.inputs.diffusion_factor = 0.5
    CSF_pv.inputs.similarity_scale = 0.1
    CSF_pv.inputs.neighborhood_size = 4
    CSF_pv.inputs.max_iter = 100
    CSF_pv.inputs.max_diff = 0.001
    CSF_pv.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, CSF_pv.name),
        CSF_pv, 'output_dir')
    wf.connect(T1ssNUCirn, 'out_file', CSF_pv, 'input_image')
    '''
    ####################################
    ####            MGDM            ####
    ####################################
    '''

    # Multi-contrast Brain Segmentation
    MGDM = Node(MGDMSegmentation(), name='MGDM')
    MGDM.plugin_args = {'sbatch_args': '--mem 7000'}
    MGDM.inputs.contrast_type1 = "Mprage3T"
    MGDM.inputs.contrast_type2 = "FLAIR3T"
    MGDM.inputs.contrast_type3 = "PVDURA"
    MGDM.inputs.save_data = True
    MGDM.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, MGDM.name), MGDM,
        'output_dir')
    wf.connect(T1ssNUCirn, 'out_file', MGDM, 'contrast_image1')
    wf.connect(T2ssNUCirn, 'out_file', MGDM, 'contrast_image2')
    wf.connect(CSF_pv, 'ridge_pv', MGDM, 'contrast_image3')

    # Enhance Region Contrast
    ERC = Node(EnhanceRegionContrast(), name='ERC')
    ERC.plugin_args = {'sbatch_args': '--mem 7000'}
    ERC.inputs.enhanced_region = "crwm"
    ERC.inputs.contrast_background = "crgm"
    ERC.inputs.partial_voluming_distance = 2.0
    ERC.inputs.save_data = True
    ERC.inputs.atlas_file = atlas
    wf.connect(subjectList,
               ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC.name),
               ERC, 'output_dir')
    wf.connect(T1ssNUC, 'output_image', ERC, 'intensity_image')
    wf.connect(MGDM, 'segmentation', ERC, 'segmentation_image')
    wf.connect(MGDM, 'distance', ERC, 'levelset_boundary_image')

    # Enhance Region Contrast (2)
    ERC2 = Node(EnhanceRegionContrast(), name='ERC2')
    ERC2.plugin_args = {'sbatch_args': '--mem 7000'}
    ERC2.inputs.enhanced_region = "crwm"
    ERC2.inputs.contrast_background = "crgm"
    ERC2.inputs.partial_voluming_distance = 2.0
    ERC2.inputs.save_data = True
    ERC2.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC2.name), ERC2,
        'output_dir')
    wf.connect(T2ssNUC, 'output_image', ERC2, 'intensity_image')
    wf.connect(MGDM, 'segmentation', ERC2, 'segmentation_image')
    wf.connect(MGDM, 'distance', ERC2, 'levelset_boundary_image')

    # Define Multi-Region Priors
    DMRP = Node(DefineMultiRegionPriors(), name='DefineMultRegPriors')
    DMRP.plugin_args = {'sbatch_args': '--mem 6000'}
    #DMRP.inputs.defined_region = "ventricle-horns"
    #DMRP.inputs.definition_method = "closest-distance"
    DMRP.inputs.distance_offset = 3.0
    DMRP.inputs.save_data = True
    DMRP.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, DMRP.name), DMRP,
        'output_dir')
    wf.connect(MGDM, 'segmentation', DMRP, 'segmentation_image')
    wf.connect(MGDM, 'distance', DMRP, 'levelset_boundary_image')
    '''
    ###############################################
    ####      REMOVE VENTRICLE POSTERIOR       ####
    ###############################################
    Due to topology constraints, the ventricles are often not fully segmented:
    here add back all ventricle voxels from the posterior probability (without the topology constraints)
    '''

    # Posterior label
    PostLabel = Node(Split(), name='PosteriorLabel')
    PostLabel.inputs.dimension = "t"
    wf.connect(MGDM, 'labels', PostLabel, 'in_file')

    # Posterior proba
    PostProba = Node(Split(), name='PosteriorProba')
    PostProba.inputs.dimension = "t"
    wf.connect(MGDM, 'memberships', PostProba, 'in_file')

    # Threshold binary mask : ventricle label part 1
    VentLabel1 = Node(Threshold(), name="VentricleLabel1")
    VentLabel1.inputs.thresh = 10.5
    VentLabel1.inputs.direction = "below"
    wf.connect(PostLabel, ("out_files", getFirstElement), VentLabel1,
               "in_file")

    # Threshold binary mask : ventricle label part 2
    VentLabel2 = Node(Threshold(), name="VentricleLabel2")
    VentLabel2.inputs.thresh = 13.5
    VentLabel2.inputs.direction = "above"
    wf.connect(VentLabel1, "out_file", VentLabel2, "in_file")

    # Image calculator : ventricle proba
    VentProba = Node(ImageMaths(), name="VentricleProba")
    VentProba.inputs.op_string = "-mul"
    VentProba.inputs.out_file = "ventproba.nii.gz"
    wf.connect(PostProba, ("out_files", getFirstElement), VentProba, "in_file")
    wf.connect(VentLabel2, "out_file", VentProba, "in_file2")

    # Image calculator : remove inter ventricles
    RmInterVent = Node(ImageMaths(), name="RemoveInterVent")
    RmInterVent.inputs.op_string = "-sub"
    RmInterVent.inputs.out_file = "rmintervent.nii.gz"
    wf.connect(ERC, "region_pv", RmInterVent, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", RmInterVent, "in_file2")

    # Image calculator : add horns
    AddHorns = Node(ImageMaths(), name="AddHorns")
    AddHorns.inputs.op_string = "-add"
    AddHorns.inputs.out_file = "rmvent.nii.gz"
    wf.connect(RmInterVent, "out_file", AddHorns, "in_file")
    wf.connect(DMRP, "ventricular_horns_pv", AddHorns, "in_file2")

    # Image calculator : remove ventricles
    RmVent = Node(ImageMaths(), name="RemoveVentricles")
    RmVent.inputs.op_string = "-sub"
    RmVent.inputs.out_file = "rmvent.nii.gz"
    wf.connect(AddHorns, "out_file", RmVent, "in_file")
    wf.connect(VentProba, "out_file", RmVent, "in_file2")

    # Image calculator : remove internal capsule
    RmIC = Node(ImageMaths(), name="RemoveInternalCap")
    RmIC.inputs.op_string = "-sub"
    RmIC.inputs.out_file = "rmic.nii.gz"
    wf.connect(RmVent, "out_file", RmIC, "in_file")
    wf.connect(DMRP, "internal_capsule_pv", RmIC, "in_file2")

    # Intensity Range Normalization (3)
    getMaxRmIC = Node(ImageStats(op_string='-r'), name="getMaxRmIC")
    wf.connect(RmIC, 'out_file', getMaxRmIC, 'in_file')

    RmICirn = Node(AbcImageMaths(), name="IntensityNormalization5")
    RmICirn.inputs.op_string = "-div"
    RmICirn.inputs.out_file = "normRmIC.nii.gz"
    wf.connect(RmIC, 'out_file', RmICirn, 'in_file')
    wf.connect(getMaxRmIC, ('out_stat', getElementFromList, 1), RmICirn,
               "op_value")

    # Probability To Levelset : WM orientation
    WM_Orient = Node(ProbabilityToLevelset(), name='WM_Orientation')
    WM_Orient.plugin_args = {'sbatch_args': '--mem 6000'}
    WM_Orient.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_Orient.name),
        WM_Orient, 'output_dir')
    wf.connect(RmICirn, 'out_file', WM_Orient, 'probability_image')

    # Recursive Ridge Diffusion : PVS in WM only
    WM_pvs = Node(RecursiveRidgeDiffusion(), name='PVS_in_WM')
    WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    WM_pvs.inputs.ridge_intensities = "bright"
    WM_pvs.inputs.ridge_filter = "1D"
    WM_pvs.inputs.orientation = "orthogonal"
    WM_pvs.inputs.ang_factor = 1.0
    WM_pvs.inputs.min_scale = 0
    WM_pvs.inputs.max_scale = 3
    WM_pvs.inputs.propagation_model = "diffusion"
    WM_pvs.inputs.diffusion_factor = 1.0
    WM_pvs.inputs.similarity_scale = 1.0
    WM_pvs.inputs.neighborhood_size = 2
    WM_pvs.inputs.max_iter = 100
    WM_pvs.inputs.max_diff = 0.001
    WM_pvs.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_pvs.name),
        WM_pvs, 'output_dir')
    wf.connect(ERC, 'background_proba', WM_pvs, 'input_image')
    wf.connect(WM_Orient, 'levelset', WM_pvs, 'surface_levelset')
    wf.connect(RmICirn, 'out_file', WM_pvs, 'loc_prior')

    # Extract Lesions : extract WM PVS
    extract_WM_pvs = Node(LesionExtraction(), name='ExtractPVSfromWM')
    extract_WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_WM_pvs.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_WM_pvs.inputs.csf_boundary_partial_vol_dist = 3.0
    extract_WM_pvs.inputs.lesion_clust_dist = 1.0
    extract_WM_pvs.inputs.prob_min_thresh = 0.1
    extract_WM_pvs.inputs.prob_max_thresh = 0.33
    extract_WM_pvs.inputs.small_lesion_size = 4.0
    extract_WM_pvs.inputs.save_data = True
    extract_WM_pvs.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_WM_pvs.name), extract_WM_pvs,
               'output_dir')
    wf.connect(WM_pvs, 'propagation', extract_WM_pvs, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_WM_pvs, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_WM_pvs, 'levelset_boundary_image')
    wf.connect(RmICirn, 'out_file', extract_WM_pvs, 'location_prior_image')
    '''
    2nd branch
    '''

    # Image calculator : internal capsule witout ventricules
    ICwoVent = Node(ImageMaths(), name="ICWithoutVentricules")
    ICwoVent.inputs.op_string = "-sub"
    ICwoVent.inputs.out_file = "icwovent.nii.gz"
    wf.connect(DMRP, "internal_capsule_pv", ICwoVent, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", ICwoVent, "in_file2")

    # Image calculator : remove ventricles IC
    RmVentIC = Node(ImageMaths(), name="RmVentIC")
    RmVentIC.inputs.op_string = "-sub"
    RmVentIC.inputs.out_file = "RmVentIC.nii.gz"
    wf.connect(ICwoVent, "out_file", RmVentIC, "in_file")
    wf.connect(VentProba, "out_file", RmVentIC, "in_file2")

    # Intensity Range Normalization (4)
    getMaxRmVentIC = Node(ImageStats(op_string='-r'), name="getMaxRmVentIC")
    wf.connect(RmVentIC, 'out_file', getMaxRmVentIC, 'in_file')

    RmVentICirn = Node(AbcImageMaths(), name="IntensityNormalization6")
    RmVentICirn.inputs.op_string = "-div"
    RmVentICirn.inputs.out_file = "normRmVentIC.nii.gz"
    wf.connect(RmVentIC, 'out_file', RmVentICirn, 'in_file')
    wf.connect(getMaxRmVentIC, ('out_stat', getElementFromList, 1),
               RmVentICirn, "op_value")

    # Probability To Levelset : IC orientation
    IC_Orient = Node(ProbabilityToLevelset(), name='IC_Orientation')
    IC_Orient.plugin_args = {'sbatch_args': '--mem 6000'}
    IC_Orient.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_Orient.name),
        IC_Orient, 'output_dir')
    wf.connect(RmVentICirn, 'out_file', IC_Orient, 'probability_image')

    # Recursive Ridge Diffusion : PVS in IC only
    IC_pvs = Node(RecursiveRidgeDiffusion(), name='RecursiveRidgeDiffusion2')
    IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    IC_pvs.inputs.ridge_intensities = "bright"
    IC_pvs.inputs.ridge_filter = "1D"
    IC_pvs.inputs.orientation = "undefined"
    IC_pvs.inputs.ang_factor = 1.0
    IC_pvs.inputs.min_scale = 0
    IC_pvs.inputs.max_scale = 3
    IC_pvs.inputs.propagation_model = "diffusion"
    IC_pvs.inputs.diffusion_factor = 1.0
    IC_pvs.inputs.similarity_scale = 1.0
    IC_pvs.inputs.neighborhood_size = 2
    IC_pvs.inputs.max_iter = 100
    IC_pvs.inputs.max_diff = 0.001
    IC_pvs.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_pvs.name),
        IC_pvs, 'output_dir')
    wf.connect(ERC, 'background_proba', IC_pvs, 'input_image')
    wf.connect(IC_Orient, 'levelset', IC_pvs, 'surface_levelset')
    wf.connect(RmVentICirn, 'out_file', IC_pvs, 'loc_prior')

    # Extract Lesions : extract IC PVS
    extract_IC_pvs = Node(LesionExtraction(), name='ExtractPVSfromIC')
    extract_IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_IC_pvs.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_IC_pvs.inputs.csf_boundary_partial_vol_dist = 4.0
    extract_IC_pvs.inputs.lesion_clust_dist = 1.0
    extract_IC_pvs.inputs.prob_min_thresh = 0.25
    extract_IC_pvs.inputs.prob_max_thresh = 0.5
    extract_IC_pvs.inputs.small_lesion_size = 4.0
    extract_IC_pvs.inputs.save_data = True
    extract_IC_pvs.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_IC_pvs.name), extract_IC_pvs,
               'output_dir')
    wf.connect(IC_pvs, 'propagation', extract_IC_pvs, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_IC_pvs, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_IC_pvs, 'levelset_boundary_image')
    wf.connect(RmVentICirn, 'out_file', extract_IC_pvs, 'location_prior_image')
    '''
    3rd branch
    '''

    # Image calculator :
    RmInter = Node(ImageMaths(), name="RemoveInterVentricules")
    RmInter.inputs.op_string = "-sub"
    RmInter.inputs.out_file = "rminter.nii.gz"
    wf.connect(ERC2, 'region_pv', RmInter, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", RmInter, "in_file2")

    # Image calculator :
    AddVentHorns = Node(ImageMaths(), name="AddVentHorns")
    AddVentHorns.inputs.op_string = "-add"
    AddVentHorns.inputs.out_file = "rminter.nii.gz"
    wf.connect(RmInter, 'out_file', AddVentHorns, "in_file")
    wf.connect(DMRP, "ventricular_horns_pv", AddVentHorns, "in_file2")

    # Intensity Range Normalization (5)
    getMaxAddVentHorns = Node(ImageStats(op_string='-r'),
                              name="getMaxAddVentHorns")
    wf.connect(AddVentHorns, 'out_file', getMaxAddVentHorns, 'in_file')

    AddVentHornsirn = Node(AbcImageMaths(), name="IntensityNormalization7")
    AddVentHornsirn.inputs.op_string = "-div"
    AddVentHornsirn.inputs.out_file = "normAddVentHorns.nii.gz"
    wf.connect(AddVentHorns, 'out_file', AddVentHornsirn, 'in_file')
    wf.connect(getMaxAddVentHorns, ('out_stat', getElementFromList, 1),
               AddVentHornsirn, "op_value")

    # Extract Lesions : extract White Matter Hyperintensities
    extract_WMH = Node(LesionExtraction(), name='Extract_WMH')
    extract_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_WMH.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_WMH.inputs.csf_boundary_partial_vol_dist = 2.0
    extract_WMH.inputs.lesion_clust_dist = 1.0
    extract_WMH.inputs.prob_min_thresh = 0.84
    extract_WMH.inputs.prob_max_thresh = 0.84
    extract_WMH.inputs.small_lesion_size = 4.0
    extract_WMH.inputs.save_data = True
    extract_WMH.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_WMH.name), extract_WMH,
               'output_dir')
    wf.connect(ERC2, 'background_proba', extract_WMH, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_WMH, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_WMH, 'levelset_boundary_image')
    wf.connect(AddVentHornsirn, 'out_file', extract_WMH,
               'location_prior_image')

    #===========================================================================
    # extract_WMH2 = extract_WMH.clone(name='Extract_WMH2')
    # extract_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH2.name),extract_WMH2,'output_dir')
    # wf.connect(ERC2,'background_proba',extract_WMH2,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_WMH2,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_WMH2,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_WMH2,'location_prior_image')
    #
    # extract_WMH3 = extract_WMH.clone(name='Extract_WMH3')
    # extract_WMH3.inputs.gm_boundary_partial_vol_dist = 3.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH3.name),extract_WMH3,'output_dir')
    # wf.connect(ERC2,'background_proba',extract_WMH3,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_WMH3,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_WMH3,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_WMH3,'location_prior_image')
    #===========================================================================
    '''
    ####################################
    ####     FINDING SMALL WMHs     ####
    ####################################
    Small round WMHs near the cortex are often missed by the main algorithm, 
    so we're adding this one that takes care of them.
    '''

    # Recursive Ridge Diffusion : round WMH detection
    round_WMH = Node(RecursiveRidgeDiffusion(), name='round_WMH')
    round_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    round_WMH.inputs.ridge_intensities = "bright"
    round_WMH.inputs.ridge_filter = "0D"
    round_WMH.inputs.orientation = "undefined"
    round_WMH.inputs.ang_factor = 1.0
    round_WMH.inputs.min_scale = 1
    round_WMH.inputs.max_scale = 4
    round_WMH.inputs.propagation_model = "none"
    round_WMH.inputs.diffusion_factor = 1.0
    round_WMH.inputs.similarity_scale = 0.1
    round_WMH.inputs.neighborhood_size = 4
    round_WMH.inputs.max_iter = 100
    round_WMH.inputs.max_diff = 0.001
    round_WMH.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, round_WMH.name),
        round_WMH, 'output_dir')
    wf.connect(ERC2, 'background_proba', round_WMH, 'input_image')
    wf.connect(AddVentHornsirn, 'out_file', round_WMH, 'loc_prior')

    # Extract Lesions : extract round WMH
    extract_round_WMH = Node(LesionExtraction(), name='Extract_round_WMH')
    extract_round_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_round_WMH.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_round_WMH.inputs.csf_boundary_partial_vol_dist = 2.0
    extract_round_WMH.inputs.lesion_clust_dist = 1.0
    extract_round_WMH.inputs.prob_min_thresh = 0.33
    extract_round_WMH.inputs.prob_max_thresh = 0.33
    extract_round_WMH.inputs.small_lesion_size = 6.0
    extract_round_WMH.inputs.save_data = True
    extract_round_WMH.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_round_WMH.name),
               extract_round_WMH, 'output_dir')
    wf.connect(round_WMH, 'ridge_pv', extract_round_WMH, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_round_WMH, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_round_WMH, 'levelset_boundary_image')
    wf.connect(AddVentHornsirn, 'out_file', extract_round_WMH,
               'location_prior_image')

    #===========================================================================
    # extract_round_WMH2 = extract_round_WMH.clone(name='Extract_round_WMH2')
    # extract_round_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH2.name),extract_round_WMH2,'output_dir')
    # wf.connect(round_WMH,'ridge_pv',extract_round_WMH2,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_round_WMH2,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_round_WMH2,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH2,'location_prior_image')
    #
    # extract_round_WMH3 = extract_round_WMH.clone(name='Extract_round_WMH3')
    # extract_round_WMH3.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH3.name),extract_round_WMH3,'output_dir')
    # wf.connect(round_WMH,'ridge_pv',extract_round_WMH3,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_round_WMH3,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_round_WMH3,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH3,'location_prior_image')
    #===========================================================================
    '''
    ####################################
    ####     COMBINE BOTH TYPES     ####
    ####################################
    Small round WMHs and regular WMH together before thresholding
    +
    PVS from white matter and internal capsule
    '''

    # Image calculator : WM + IC DVRS
    DVRS = Node(ImageMaths(), name="DVRS")
    DVRS.inputs.op_string = "-max"
    DVRS.inputs.out_file = "DVRS_map.nii.gz"
    wf.connect(extract_WM_pvs, 'lesion_score', DVRS, "in_file")
    wf.connect(extract_IC_pvs, "lesion_score", DVRS, "in_file2")

    # Image calculator : WMH + round
    WMH = Node(ImageMaths(), name="WMH")
    WMH.inputs.op_string = "-max"
    WMH.inputs.out_file = "WMH_map.nii.gz"
    wf.connect(extract_WMH, 'lesion_score', WMH, "in_file")
    wf.connect(extract_round_WMH, "lesion_score", WMH, "in_file2")

    #===========================================================================
    # WMH2 = Node(ImageMaths(), name="WMH2")
    # WMH2.inputs.op_string = "-max"
    # WMH2.inputs.out_file = "WMH2_map.nii.gz"
    # wf.connect(extract_WMH2,'lesion_score',WMH2,"in_file")
    # wf.connect(extract_round_WMH2,"lesion_score", WMH2, "in_file2")
    #
    # WMH3 = Node(ImageMaths(), name="WMH3")
    # WMH3.inputs.op_string = "-max"
    # WMH3.inputs.out_file = "WMH3_map.nii.gz"
    # wf.connect(extract_WMH3,'lesion_score',WMH3,"in_file")
    # wf.connect(extract_round_WMH3,"lesion_score", WMH3, "in_file2")
    #===========================================================================

    # Image calculator : multiply by boundnary partial volume
    WMH_mul = Node(ImageMaths(), name="WMH_mul")
    WMH_mul.inputs.op_string = "-mul"
    WMH_mul.inputs.out_file = "final_mask.nii.gz"
    wf.connect(WMH, "out_file", WMH_mul, "in_file")
    wf.connect(MGDM, "distance", WMH_mul, "in_file2")

    #===========================================================================
    # WMH2_mul = Node(ImageMaths(), name="WMH2_mul")
    # WMH2_mul.inputs.op_string = "-mul"
    # WMH2_mul.inputs.out_file = "final_mask.nii.gz"
    # wf.connect(WMH2,"out_file", WMH2_mul,"in_file")
    # wf.connect(MGDM,"distance", WMH2_mul, "in_file2")
    #
    # WMH3_mul = Node(ImageMaths(), name="WMH3_mul")
    # WMH3_mul.inputs.op_string = "-mul"
    # WMH3_mul.inputs.out_file = "final_mask.nii.gz"
    # wf.connect(WMH3,"out_file", WMH3_mul,"in_file")
    # wf.connect(MGDM,"distance", WMH3_mul, "in_file2")
    #===========================================================================
    '''
    ##########################################
    ####      SEGMENTATION THRESHOLD      ####
    ##########################################
    A threshold of 0.5 is very conservative, because the final lesion score is the product of two probabilities.
    This needs to be optimized to a value between 0.25 and 0.5 to balance false negatives 
    (dominant at 0.5) and false positives (dominant at low values).
    '''

    # Threshold binary mask :
    DVRS_mask = Node(Threshold(), name="DVRS_mask")
    DVRS_mask.inputs.thresh = 0.25
    DVRS_mask.inputs.direction = "below"
    wf.connect(DVRS, "out_file", DVRS_mask, "in_file")

    # Threshold binary mask : 025
    WMH1_025 = Node(Threshold(), name="WMH1_025")
    WMH1_025.inputs.thresh = 0.25
    WMH1_025.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_025, "in_file")

    #===========================================================================
    # WMH2_025 = Node(Threshold(), name="WMH2_025")
    # WMH2_025.inputs.thresh = 0.25
    # WMH2_025.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_025, "in_file")
    #
    # WMH3_025 = Node(Threshold(), name="WMH3_025")
    # WMH3_025.inputs.thresh = 0.25
    # WMH3_025.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_025, "in_file")
    #===========================================================================

    # Threshold binary mask : 050
    WMH1_050 = Node(Threshold(), name="WMH1_050")
    WMH1_050.inputs.thresh = 0.50
    WMH1_050.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_050, "in_file")

    #===========================================================================
    # WMH2_050 = Node(Threshold(), name="WMH2_050")
    # WMH2_050.inputs.thresh = 0.50
    # WMH2_050.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_050, "in_file")
    #
    # WMH3_050 = Node(Threshold(), name="WMH3_050")
    # WMH3_050.inputs.thresh = 0.50
    # WMH3_050.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_050, "in_file")
    #===========================================================================

    # Threshold binary mask : 075
    WMH1_075 = Node(Threshold(), name="WMH1_075")
    WMH1_075.inputs.thresh = 0.75
    WMH1_075.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_075, "in_file")

    #===========================================================================
    # WMH2_075 = Node(Threshold(), name="WMH2_075")
    # WMH2_075.inputs.thresh = 0.75
    # WMH2_075.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_075, "in_file")
    #
    # WMH3_075 = Node(Threshold(), name="WMH3_075")
    # WMH3_075.inputs.thresh = 0.75
    # WMH3_075.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_075, "in_file")
    #===========================================================================

    ## Outputs

    DVRS_Output = Node(IdentityInterface(fields=[
        'mask', 'region', 'lesion_size', 'lesion_proba', 'boundary', 'label',
        'score'
    ]),
                       name='DVRS_Output')
    wf.connect(DVRS_mask, 'out_file', DVRS_Output, 'mask')

    WMH_output = Node(IdentityInterface(fields=[
        'mask1025', 'mask1050', 'mask1075', 'mask2025', 'mask2050', 'mask2075',
        'mask3025', 'mask3050', 'mask3075'
    ]),
                      name='WMH_output')
    wf.connect(WMH1_025, 'out_file', WMH_output, 'mask1025')
    #wf.connect(WMH2_025,'out_file',WMH_output,'mask2025')
    #wf.connect(WMH3_025,'out_file',WMH_output,'mask3025')
    wf.connect(WMH1_050, 'out_file', WMH_output, 'mask1050')
    #wf.connect(WMH2_050,'out_file',WMH_output,'mask2050')
    #wf.connect(WMH3_050,'out_file',WMH_output,'mask3050')
    wf.connect(WMH1_075, 'out_file', WMH_output, 'mask1075')
    #wf.connect(WMH2_075,'out_file',WMH_output,'mask2070')
    #wf.connect(WMH3_075,'out_file',WMH_output,'mask3075')

    return wf
예제 #10
0
# FSL randomise for higher level analysis
highermodel = Node(Randomise(tfce=True,
                             raw_stats_imgs=True,
                             design_mat=group_mat,
                             tcon=group_con),
                   name='highermodel')

## Cluster results

# make binary masks of sig clusters
binarize = Node(Binarize(min=0.95, max=1.0),
                name='binarize',
                iterfield='in_file')

# mask T-map before clustering
mask_tmaps = Node(ApplyMask(), name='mask_tmaps')

# clusterize and extract cluster stats/peaks
clusterize = Node(Cluster(threshold=2.3,
                          out_index_file='outindex.nii',
                          out_localmax_txt_file='localmax.txt'),
                  name='clusterize')

# make pictures if time

# In[ ]:

sbc2_workflow2 = Workflow(name='sbc2_workflow2')
sbc2_workflow2.connect([(infosource2, datagrabber, [('roi', 'roi')]),
                        (datagrabber, merge, [('roi', 'in_files')]),
                        (merge, highermodel, [('merged_file', 'in_file')]),
    def _run_interface(self, runtime):

        # Loading required packages
        from BrainTypes_additional_interfaces import DipyDenoise
        from nipype.interfaces.diffusion_toolkit.dti import DTIRecon
        import nipype.interfaces.fsl as fsl
        import nipype.pipeline.engine as pe
        import nipype.interfaces.io as nio
        import nipype.interfaces.utility as util
        from nipype.algorithms.misc import Gunzip
        from nipype.interfaces.fsl.maths import ApplyMask
        import os

        # ==============================================================
        # Processing of diffusion-weighted data
        # Extract b0 image
        fslroi = pe.Node(interface=fsl.ExtractROI(), name='extract_b0')
        fslroi.inputs.in_file = self.inputs.dwi
        fslroi.inputs.t_min = 0
        fslroi.inputs.t_size = 1

        # Create a brain mask
        bet = pe.Node(interface=fsl.BET(frac=0.3,
                                        robust=False,
                                        mask=True,
                                        no_output=False),
                      name='bet')

        # Eddy-current and motion correction
        eddy = pe.Node(interface=fsl.epi.Eddy(args='-v'), name='eddy')
        eddy.inputs.in_acqp = self.inputs.acqparams
        eddy.inputs.in_bvec = self.inputs.bvecs
        eddy.inputs.in_bval = self.inputs.bvals
        eddy.inputs.in_file = self.inputs.dwi
        eddy.inputs.in_index = self.inputs.index_file

        # Denoising
        dwi_denoise = pe.Node(interface=DipyDenoise(), name='dwi_denoise')
        dwi_denoise.inputs.in_file = self.inputs.dwi

        # Fitting the diffusion tensor model
        dtifit = pe.Node(interface=DTIRecon(), name='dtifit')
        dtifit.inputs.out_prefix = self.inputs.subject_id
        dtifit.inputs.bvals = self.inputs.bvals
        dtifit.inputs.bvecs = self.inputs.bvecs

        # Applying the mask
        FA_applymask = pe.Node(interface=ApplyMask(), name='FA_applymask')

        # Renaming the outputs
        b0_rename = pe.Node(interface=util.Rename(keep_ext=True),
                            name='b0_rename')
        b0_rename.inputs.format_string = self.inputs.subject_id + '_b0'

        colourFA_rename = pe.Node(interface=util.Rename(keep_ext=True),
                                  name='colourFA_rename')
        colourFA_rename.inputs.format_string = self.inputs.subject_id + '_colourFA'

        FA_rename = pe.Node(interface=util.Rename(keep_ext=True),
                            name='FA_rename')
        FA_rename.inputs.format_string = self.inputs.subject_id + '_FA'

        dwi_rename = pe.Node(interface=util.Rename(keep_ext=True),
                             name='dwi_rename')
        dwi_rename.inputs.format_string = self.inputs.subject_id + '_dwi'

        mask_rename = pe.Node(interface=util.Rename(keep_ext=True),
                              name='mask_rename')
        mask_rename.inputs.format_string = self.inputs.subject_id + '_mask'

        # Collect everything
        datasink = pe.Node(nio.DataSink(), name='sinker')
        datasink.inputs.parameterization = False
        datasink.inputs.base_directory = self.inputs.out_directory + '/_subject_id_' + self.inputs.subject_id

        # ==============================================================
        # Setting up the workflow
        dwi_preproc = pe.Workflow(name='dwi_preproc')

        # Diffusion data
        # Preprocessing
        dwi_preproc.connect(fslroi, 'roi_file', bet, 'in_file')
        dwi_preproc.connect(bet, 'mask_file', eddy, 'in_mask')
        dwi_preproc.connect(eddy, 'out_corrected', dwi_denoise, 'in_file')

        # Calculate diffusion measures
        dwi_preproc.connect(dwi_denoise, 'out_file', dtifit, 'DWI')

        # Applying the mask to the scalar images
        dwi_preproc.connect(dtifit, 'FA', FA_applymask, 'in_file')
        dwi_preproc.connect(bet, 'mask_file', FA_applymask, 'mask_file')

        # Renaming
        dwi_preproc.connect(fslroi, 'roi_file', b0_rename, 'in_file')
        dwi_preproc.connect(dwi_denoise, 'out_file', dwi_rename, 'in_file')
        dwi_preproc.connect(FA_applymask, 'out_file', FA_rename, 'in_file')
        dwi_preproc.connect(dtifit, 'FA_color', colourFA_rename, 'in_file')
        dwi_preproc.connect(bet, 'mask_file', mask_rename, 'in_file')

        # Moving the results to the datasink
        dwi_preproc.connect(b0_rename, 'out_file', datasink,
                            'preprocessed.@b0')
        dwi_preproc.connect(colourFA_rename, 'out_file', datasink,
                            'preprocessed.@colourFA')
        dwi_preproc.connect(dwi_rename, 'out_file', datasink,
                            'preprocessed.@dwi')
        dwi_preproc.connect(FA_rename, 'out_file', datasink,
                            'preprocessed.@FA')
        dwi_preproc.connect(mask_rename, 'out_file', datasink,
                            'preprocessed.@mask')

        # ==============================================================
        # Running the workflow
        dwi_preproc.base_dir = os.path.abspath(self.inputs.out_directory +
                                               '_subject_id_' +
                                               self.inputs.subject_id)
        dwi_preproc.write_graph()
        dwi_preproc.run()

        return runtime
예제 #12
0
# Registration- using FLIRT
# The BOLD image is 'in_file', the anat is 'reference', the output is 'out_file'
coreg1 = Node(FLIRT(), name='coreg1')
coreg2 = Node(FLIRT(apply_xfm=True), name='coreg2')

# make binary mask
# structural is the 'in_file', output is 'binary_file'
binarize_struct = Node(Binarize(dilate=mask_dilation,
                                erode=mask_erosion,
                                min=1),
                       name='binarize_struct')

# apply the binary mask to the functional data
# functional is 'in_file', binary mask is 'mask_file', output is 'out_file'
mask_func = Node(ApplyMask(), name='mask_func')

# Artifact detection for scrubbing/motion assessment
art = Node(
    ArtifactDetect(
        mask_type='file',
        parameter_source='FSL',
        norm_threshold=
        0.5,  #mutually exclusive with rotation and translation thresh
        zintensity_threshold=3,
        use_differences=[True, False]),
    name='art')


def converthex_xform(orig_xform):
    from numpy import genfromtxt, savetxt
예제 #13
0
def longitudinal_registration(sub_id,
                              datasource,
                              sessions,
                              reference,
                              result_dir,
                              nipype_cache,
                              bet_workflow=None):
    """
    This is a workflow to register multi-modalities MR (T2, T1KM, FLAIR) to their 
    reference T1 image, in multiple time-points cohort. In particular, for each 
    subject, this workflow will register the MR images in each time-point (tp)
    to the corresponding T1, then it will register all the T1 images to a reference T1
    (the one that is the closest in time to the radiotherapy session), and finally the
    reference T1 to the BPLCT. At the end, all the MR images will be saved both in T1 space
    (for each tp) and in CT space.
    """
    reg2T1 = nipype.MapNode(interface=AntsRegSyn(),
                            iterfield=['input_file'],
                            name='reg2T1')
    reg2T1.inputs.transformation = 's'
    reg2T1.inputs.num_dimensions = 3
    reg2T1.inputs.num_threads = 6

    if reference:
        regT12CT = nipype.MapNode(interface=AntsRegSyn(),
                                  iterfield=['input_file'],
                                  name='regT12CT')
        regT12CT.inputs.transformation = 'r'
        regT12CT.inputs.num_dimensions = 3
        regT12CT.inputs.num_threads = 4

    reg_nodes = []
    for i in range(3):
        reg = nipype.MapNode(interface=AntsRegSyn(),
                             iterfield=['input_file', 'ref_file'],
                             name='ants_reg{}'.format(i))
        reg.inputs.transformation = 'r'
        reg.inputs.num_dimensions = 3
        reg.inputs.num_threads = 4
        reg.inputs.interpolation = 'BSpline'
        reg_nodes.append(reg)

    apply_mask_nodes = []
    for i in range(3):
        masking = nipype.MapNode(interface=ApplyMask(),
                                 iterfield=['in_file', 'mask_file'],
                                 name='masking{}'.format(i))
        apply_mask_nodes.append(masking)

    apply_ts_nodes = []
    for i in range(3):
        apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                  iterfield=['input_image', 'transforms'],
                                  name='apply_ts{}'.format(i))
        apply_ts_nodes.append(apply_ts)
    # Apply ts nodes for T1_ref normalization
    apply_ts_nodes1 = []
    for i in range(3):
        apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                  iterfield=['input_image', 'transforms'],
                                  name='apply_ts1{}'.format(i))
        apply_ts_nodes1.append(apply_ts)

    split_ds_nodes = []
    for i in range(4):
        split_ds = nipype.Node(interface=Split(), name='split_ds{}'.format(i))
        split_ds.inputs.splits = [1] * len(sessions)
        split_ds_nodes.append(split_ds)

    apply_ts_t1 = nipype.MapNode(interface=ApplyTransforms(),
                                 iterfield=['input_image', 'transforms'],
                                 name='apply_ts_t1')
    merge_nodes = []
    if reference:
        iterfields = ['in1', 'in2', 'in3', 'in4']
        iterfields_t1 = ['in1', 'in2', 'in3']
        if_0 = 2
    else:
        iterfields = ['in1', 'in2', 'in3']
        iterfields_t1 = ['in1', 'in2']
        if_0 = 1

    for i in range(3):
        merge = nipype.MapNode(interface=Merge(len(iterfields)),
                               iterfield=iterfields,
                               name='merge{}'.format(i))
        merge.inputs.ravel_inputs = True
        merge_nodes.append(merge)
    # Merging transforms for normalization to T1_ref
    merge_nodes1 = []
    for i in range(3):
        merge = nipype.MapNode(interface=Merge(3),
                               iterfield=['in1', 'in2', 'in3'],
                               name='merge1{}'.format(i))
        merge.inputs.ravel_inputs = True
        merge_nodes1.append(merge)

    merge_ts_t1 = nipype.MapNode(interface=Merge(len(iterfields_t1)),
                                 iterfield=iterfields_t1,
                                 name='merge_t1')
    merge_ts_t1.inputs.ravel_inputs = True

    # have to create a fake merge of the transformation from t10 to CT in order
    # to have the same number if matrices as input in mapnode
    fake_merge = nipype.Node(interface=Merge(len(sessions)), name='fake_merge')

    datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                           "datasink")

    substitutions = [('subid', sub_id)]
    for i, session in enumerate(sessions):
        substitutions += [('session'.format(i), session)]
        substitutions += [('_masking0{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'CT1_preproc.nii.gz')]
        substitutions += [('_reg2T1{}/antsreg0GenericAffine.mat'.format(i),
                           session + '/' + 'reg2T1_ref.mat')]
        substitutions += [('_reg2T1{}/antsreg1Warp.nii.gz'.format(i),
                           session + '/' + 'reg2T1_ref_warp.nii.gz')]
        substitutions += [('_reg2T1{}/antsregWarped.nii.gz'.format(i),
                           session + '/' + 'T1_reg2T1_ref.nii.gz')]
        substitutions += [('_regT12CT{}/antsreg0GenericAffine.mat'.format(i),
                           '/regT1_ref2CT.mat')]
        substitutions += [('_masking1{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'T2_preproc.nii.gz')]
        substitutions += [('_masking2{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'FLAIR_preproc.nii.gz')]
        substitutions += [('_apply_ts0{}/CT1_trans.nii.gz'.format(i),
                           session + '/' + 'CT1_reg2CT.nii.gz')]
        substitutions += [('_apply_ts1{}/T2_trans.nii.gz'.format(i),
                           session + '/' + 'T2_reg2CT.nii.gz')]
        substitutions += [('_apply_ts2{}/FLAIR_trans.nii.gz'.format(i),
                           session + '/' + 'FLAIR_reg2CT.nii.gz')]
        substitutions += [('_apply_ts_t1{}/T1_trans.nii.gz'.format(i),
                           session + '/' + 'T1_reg2CT.nii.gz')]
        substitutions += [('_apply_ts10{}/CT1_trans.nii.gz'.format(i),
                           session + '/' + 'CT1_reg2T1_ref.nii.gz')]
        substitutions += [('_apply_ts11{}/T2_trans.nii.gz'.format(i),
                           session + '/' + 'T2_reg2T1_ref.nii.gz')]
        substitutions += [('_apply_ts12{}/FLAIR_trans.nii.gz'.format(i),
                           session + '/' + 'FLAIR_reg2T1_ref.nii.gz')]

    datasink.inputs.substitutions = substitutions
    # Create Workflow
    workflow = nipype.Workflow('registration_workflow', base_dir=nipype_cache)

    for i, reg in enumerate(reg_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], reg, 'input_file')
        workflow.connect(datasource, SEQUENCES[0], reg, 'ref_file')
    # bring every MR in CT space
    for i, node in enumerate(apply_ts_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], node, 'input_image')
        if reference:
            workflow.connect(datasource, 'reference', node, 'reference_image')
        else:
            workflow.connect(datasource, 't1_0', node, 'reference_image')
        workflow.connect(merge_nodes[i], 'out', node, 'transforms')
        workflow.connect(node, 'output_image', datasink,
                         'results.subid.@{}_reg2CT'.format(SEQUENCES[i + 1]))
    # bring every MR in T1_ref space
    for i, node in enumerate(apply_ts_nodes1):
        workflow.connect(datasource, SEQUENCES[i + 1], node, 'input_image')
        workflow.connect(datasource, 't1_0', node, 'reference_image')
        workflow.connect(merge_nodes1[i], 'out', node, 'transforms')
        workflow.connect(
            node, 'output_image', datasink,
            'results.subid.@{}_reg2T1_ref'.format(SEQUENCES[i + 1]))

    for i, node in enumerate(merge_nodes):
        workflow.connect(reg_nodes[i], 'regmat', node, 'in{}'.format(if_0 + 2))
        workflow.connect(reg2T1, 'regmat', node, 'in{}'.format(if_0 + 1))
        workflow.connect(reg2T1, 'warp_file', node, 'in{}'.format(if_0))
        if reference:
            workflow.connect(fake_merge, 'out', node, 'in1')

    for i, node in enumerate(merge_nodes1):
        workflow.connect(reg_nodes[i], 'regmat', node, 'in3')
        workflow.connect(reg2T1, 'regmat', node, 'in2')
        workflow.connect(reg2T1, 'warp_file', node, 'in1')

    for i, mask in enumerate(apply_mask_nodes):
        workflow.connect(reg_nodes[i], 'reg_file', mask, 'in_file')
        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_mask', mask, 'mask_file')
        else:
            workflow.connect(datasource, 't1_mask', mask, 'mask_file')
        workflow.connect(mask, 'out_file', datasink,
                         'results.subid.@{}_preproc'.format(SEQUENCES[i + 1]))
    if bet_workflow is not None:
        workflow.connect(bet_workflow, 'bet.out_file', reg2T1, 'input_file')
        workflow.connect(bet_workflow, 't1_0_bet.out_file', reg2T1, 'ref_file')
    else:
        workflow.connect(datasource, 't1_bet', reg2T1, 'input_file')
        workflow.connect(datasource, 't1_0_bet', reg2T1, 'ref_file')

    if reference:
        for i, sess in enumerate(sessions):
            workflow.connect(regT12CT, 'regmat', fake_merge,
                             'in{}'.format(i + 1))
            workflow.connect(regT12CT, 'regmat', datasink,
                             'results.subid.{0}.@regT12CT_mat'.format(sess))
        workflow.connect(datasource, 'reference', regT12CT, 'ref_file')
        workflow.connect(datasource, 't1_0', regT12CT, 'input_file')
        workflow.connect(fake_merge, 'out', merge_ts_t1, 'in1')
        workflow.connect(datasource, 'reference', apply_ts_t1,
                         'reference_image')
    else:
        workflow.connect(datasource, 't1_0', apply_ts_t1, 'reference_image')

    workflow.connect(datasource, 't1', apply_ts_t1, 'input_image')

    workflow.connect(merge_ts_t1, 'out', apply_ts_t1, 'transforms')
    workflow.connect(reg2T1, 'regmat', merge_ts_t1, 'in{}'.format(if_0 + 1))
    workflow.connect(reg2T1, 'warp_file', merge_ts_t1, 'in{}'.format(if_0))

    workflow.connect(reg2T1, 'warp_file', datasink,
                     'results.subid.@reg2CT_warp')
    workflow.connect(reg2T1, 'regmat', datasink, 'results.subid.@reg2CT_mat')
    workflow.connect(reg2T1, 'reg_file', datasink, 'results.subid.@T12T1_ref')
    workflow.connect(apply_ts_t1, 'output_image', datasink,
                     'results.subid.@T1_reg2CT')

    if bet_workflow is not None:
        workflow = datasink_base(datasink, datasource, workflow, sessions,
                                 reference)
    else:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow,
                                 sessions,
                                 reference,
                                 extra_nodes=['t1_bet'])

    return workflow
예제 #14
0
def single_tp_registration(sub_id,
                           datasource,
                           session,
                           reference,
                           result_dir,
                           nipype_cache,
                           bet_workflow=None):
    """
    This is a workflow to register multi-modalities MR (T2, T1KM, FLAIR) to their 
    reference T1 image, in one single time-point cohort. In particular, for each 
    subject, this workflow will register the MR images in the provided time-point (tp)
    to the corresponding T1, then it will register the T1 image to the BPLCT (if present)'
    '. At the end, all the MR images will be saved both in T1 space and in CT space.
    """
    session = session[0]
    if reference:
        regT12CT = nipype.MapNode(interface=AntsRegSyn(),
                                  iterfield=['input_file'],
                                  name='regT12CT')
        regT12CT.inputs.transformation = 'r'
        regT12CT.inputs.num_dimensions = 3
        regT12CT.inputs.num_threads = 4

    reg_nodes = []
    for i in range(3):
        reg = nipype.MapNode(interface=AntsRegSyn(),
                             iterfield=['input_file', 'ref_file'],
                             name='ants_reg{}'.format(i))
        reg.inputs.transformation = 'r'
        reg.inputs.num_dimensions = 3
        reg.inputs.num_threads = 4
        reg.inputs.interpolation = 'BSpline'
        reg_nodes.append(reg)

    apply_mask_nodes = []
    for i in range(3):
        masking = nipype.MapNode(interface=ApplyMask(),
                                 iterfield=['in_file', 'mask_file'],
                                 name='masking{}'.format(i))
        apply_mask_nodes.append(masking)

    if reference:
        apply_ts_nodes = []
        for i in range(3):
            apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                      iterfield=['input_image', 'transforms'],
                                      name='apply_ts{}'.format(i))
            apply_ts_nodes.append(apply_ts)

        apply_ts_t1 = nipype.MapNode(interface=ApplyTransforms(),
                                     iterfield=['input_image', 'transforms'],
                                     name='apply_ts_t1')

        merge_nodes = []
        for i in range(3):
            merge = nipype.MapNode(interface=Merge(2),
                                   iterfield=['in1', 'in2'],
                                   name='merge{}'.format(i))
            merge.inputs.ravel_inputs = True
            merge_nodes.append(merge)

    datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                           "datasink")

    substitutions = [('subid', sub_id)]
    substitutions += [('session', session)]
    substitutions += [('_regT12CT0/antsreg0GenericAffine.mat',
                       '/reg2T1_ref.mat')]
    substitutions += [('_masking00/antsregWarped_masked.nii.gz',
                       session + '/' + 'CT1_preproc.nii.gz')]
    substitutions += [('_regT12CT/antsreg0GenericAffine.mat',
                       '/regT1_ref2CT.mat')]
    substitutions += [('_masking10/antsregWarped_masked.nii.gz',
                       session + '/' + 'T2_preproc.nii.gz')]
    substitutions += [('_masking20/antsregWarped_masked.nii.gz',
                       session + '/' + 'FLAIR_preproc.nii.gz')]
    substitutions += [('_apply_ts00/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'CT1_reg2CT.nii.gz')]
    substitutions += [('_apply_ts10/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'T2_reg2CT.nii.gz')]
    substitutions += [('_apply_ts20/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'FLAIR_reg2CT.nii.gz')]
    substitutions += [('_apply_ts_t10/T1_preproc_trans.nii.gz',
                       session + '/' + 'T1_reg2CT.nii.gz')]

    datasink.inputs.substitutions = substitutions
    # Create Workflow
    workflow = nipype.Workflow('registration_workflow', base_dir=nipype_cache)

    for i, reg in enumerate(reg_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], reg, 'input_file')
        workflow.connect(datasource, SEQUENCES[0], reg, 'ref_file')
    # bring every MR in CT space
    if reference:
        for i, node in enumerate(merge_nodes):
            workflow.connect(reg_nodes[i], 'regmat', node, 'in2')
            workflow.connect(regT12CT, 'regmat', node, 'in1')
        for i, node in enumerate(apply_ts_nodes):
            workflow.connect(apply_mask_nodes[i], 'out_file', node,
                             'input_image')
            workflow.connect(datasource, 'reference', node, 'reference_image')
            workflow.connect(regT12CT, 'regmat', node, 'transforms')
            workflow.connect(
                node, 'output_image', datasink,
                'results.subid.@{}_reg2CT'.format(SEQUENCES[i + 1]))

        workflow.connect(regT12CT, 'regmat', datasink,
                         'results.subid.{0}.@regT12CT_mat'.format(session))
        workflow.connect(datasource, 'reference', regT12CT, 'ref_file')
        workflow.connect(datasource, 't1', regT12CT, 'input_file')

        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_file', apply_ts_t1,
                             'input_image')
        else:
            workflow.connect(datasource, 't1_bet', apply_ts_t1, 'input_image')
        workflow.connect(datasource, 'reference', apply_ts_t1,
                         'reference_image')
        workflow.connect(apply_ts_t1, 'output_image', datasink,
                         'results.subid.@T1_reg2CT')
        workflow.connect(regT12CT, 'regmat', apply_ts_t1, 'transforms')

    for i, mask in enumerate(apply_mask_nodes):
        workflow.connect(reg_nodes[i], 'reg_file', mask, 'in_file')
        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_mask', mask, 'mask_file')
        else:
            workflow.connect(datasource, 't1_mask', mask, 'mask_file')
        workflow.connect(mask, 'out_file', datasink,
                         'results.subid.@{}_preproc'.format(SEQUENCES[i + 1]))

    if bet_workflow is not None:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow, [session],
                                 reference,
                                 t10=False)
    else:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow, [session],
                                 reference,
                                 extra_nodes=['t1_bet'],
                                 t10=False)

    return workflow
예제 #15
0
def init_brain_extraction_wf(
    name="brain_extraction_wf",
    in_template="OASIS30ANTs",
    template_spec=None,
    use_float=True,
    normalization_quality="precise",
    omp_nthreads=None,
    mem_gb=3.0,
    bids_suffix="T1w",
    atropos_refine=True,
    atropos_use_random_seed=True,
    atropos_model=None,
    use_laplacian=True,
    bspline_fitting_distance=200,
):
    """
    Build a workflow for atlas-based brain extraction on anatomical MRI data.

    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).

    The official workflow is built as follows (and this implementation
    follows the same organization):

      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from niworkflows.anat.ants import init_brain_extraction_wf
            wf = init_brain_extraction_wf()

    Parameters
    ----------
    in_template : str
        Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
        path).
        The brain template from which regions will be projected
        Anatomical template created using e.g. LPBA40 data set with
        ``buildtemplateparallel.sh`` in ANTs.
        The workflow will automatically search for a brain probability
        mask created using e.g. LPBA40 data set which have brain masks
        defined, and warped to anatomical template and
        averaged resulting in a probability image.
    use_float : bool
        Whether single precision should be used
    normalization_quality : str
        Use more precise or faster registration parameters
        (default: ``precise``, other possible values: ``testing``)
    omp_nthreads : int
        Maximum number of threads an individual process may use
    mem_gb : float
        Estimated peak memory consumption of the most hungry nodes
        in the workflow
    bids_suffix : str
        Sequence type of the first input image. For a list of acceptable values
        see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
    atropos_refine : bool
        Enables or disables the whole ATROPOS sub-workflow
    atropos_use_random_seed : bool
        Whether ATROPOS should generate a random seed based on the
        system's clock
    atropos_model : tuple or None
        Allows to specify a particular segmentation model, overwriting
        the defaults based on ``bids_suffix``
    use_laplacian : bool
        Enables or disables alignment of the Laplacian as an additional
        criterion for image registration quality (default: True)
    bspline_fitting_distance : float
        The size of the b-spline mesh grid elements, in mm (default: 200)
    name : str, optional
        Workflow name (default: antsBrainExtraction)

    Inputs
    ------
    in_files : list
        List of input anatomical images to be brain-extracted,
        typically T1-weighted.
        If a list of anatomical images is provided, subsequently
        specified images are used during the segmentation process.
        However, only the first image is used in the registration
        of priors.
        Our suggestion would be to specify the T1w as the first image.
    in_mask : list, optional
        Mask used for registration to limit the metric
        computation to a specific region.

    Outputs
    -------
    out_file : str
        Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
    out_mask : str
        Calculated brain mask
    bias_corrected : str
        The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
        correction, before skull-stripping.
    bias_image : str
        The :abbr:`INU (intensity non-uniformity)` field estimated for each
        input in ``in_files``
    out_segm : str
        Output segmentation by ATROPOS
    out_tpms : str
        Output :abbr:`TPMs (tissue probability maps)` by ATROPOS

    """
    from templateflow.api import get as get_template

    wf = pe.Workflow(name)

    template_spec = template_spec or {}

    # suffix passed via spec takes precedence
    template_spec["suffix"] = template_spec.get("suffix", bids_suffix)

    tpl_target_path, common_spec = get_template_specs(
        in_template, template_spec=template_spec)

    # Get probabilistic brain mask if available
    tpl_mask_path = get_template(
        in_template, label="brain", suffix="probseg", **
        common_spec) or get_template(
            in_template, desc="brain", suffix="mask", **common_spec)

    if omp_nthreads is None or omp_nthreads < 1:
        omp_nthreads = cpu_count()

    inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]),
                        name="inputnode")

    # Try to find a registration mask, set if available
    tpl_regmask_path = get_template(in_template,
                                    desc="BrainCerebellumExtraction",
                                    suffix="mask",
                                    **common_spec)
    if tpl_regmask_path:
        inputnode.inputs.in_mask = str(tpl_regmask_path)

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "out_file",
            "out_mask",
            "bias_corrected",
            "bias_image",
            "out_segm",
            "out_tpms",
        ]),
        name="outputnode",
    )

    copy_xform = pe.Node(
        CopyXForm(
            fields=["out_file", "out_mask", "bias_corrected", "bias_image"]),
        name="copy_xform",
        run_without_submitting=True,
    )

    trunc = pe.MapNode(
        ImageMath(operation="TruncateImageIntensity", op2="0.01 0.999 256"),
        name="truncate_images",
        iterfield=["op1"],
    )
    inu_n4 = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * 4,
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="inu_n4",
        iterfield=["input_image"],
    )

    res_tmpl = pe.Node(
        ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True),
        name="res_tmpl",
    )
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(
        ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True),
        name="res_target",
    )

    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="1.5 1"),
                       name="lap_tmpl")
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation="Laplacian", op2="1.5 1"),
                         name="lap_target")
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name="mrg_target")

    # Initialize transforms with antsAI
    init_aff = pe.Node(
        AI(
            metric=("Mattes", 32, "Regular", 0.25),
            transform=("Affine", 0.1),
            search_factor=(15, 0.1),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            verbose=True,
        ),
        name="init_aff",
        n_procs=omp_nthreads,
    )

    # Tolerate missing ANTs at construction time
    _ants_version = Registration().version
    if _ants_version and parseversion(_ants_version) >= Version("2.3.0"):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    settings_file = ("antsBrainExtraction_%s.json" if use_laplacian else
                     "antsBrainExtractionNoLaplacian_%s.json")
    norm = pe.Node(
        Registration(from_file=pkgr_fn("niworkflows.data", settings_file %
                                       normalization_quality)),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float
    fixed_mask_trait = "fixed_image_mask"
    if _ants_version and parseversion(_ants_version) >= Version("2.2.0"):
        fixed_mask_trait += "s"

    map_brainmask = pe.Node(
        ApplyTransforms(interpolation="Gaussian", float=True),
        name="map_brainmask",
        mem_gb=1,
    )
    map_brainmask.inputs.input_image = str(tpl_mask_path)

    thr_brainmask = pe.Node(
        ThresholdImage(dimension=3,
                       th_low=0.5,
                       th_high=1.0,
                       inside_value=1,
                       outside_value=0),
        name="thr_brainmask",
    )

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation="MD", op2="2"),
                            name="dil_brainmask")
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation="GetLargestComponent"),
                            name="get_brainmask")

    # Refine INU correction
    inu_n4_final = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="inu_n4_final",
        iterfield=["input_image"],
    )
    if _ants_version and parseversion(_ants_version) >= Version("2.1.0"):
        inu_n4_final.inputs.rescale_intensities = True
    else:
        warn(
            """\
Found ANTs version %s, which is too old. Please consider upgrading to 2.1.0 or \
greater so that the --rescale-intensities option is available with \
N4BiasFieldCorrection.""" % _ants_version,
            DeprecationWarning,
        )

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=["in_file"],
                            name="apply_mask")

    # fmt: off
    wf.connect([
        (inputnode, trunc, [("in_files", "op1")]),
        (inputnode, copy_xform, [(("in_files", _pop), "hdr_file")]),
        (inputnode, inu_n4_final, [("in_files", "input_image")]),
        (inputnode, init_aff, [("in_mask", "fixed_image_mask")]),
        (inputnode, norm, [("in_mask", fixed_mask_trait)]),
        (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
        (trunc, inu_n4, [("output_image", "input_image")]),
        (inu_n4, res_target, [(("output_image", _pop), "input_image")]),
        (res_tmpl, init_aff, [("output_image", "fixed_image")]),
        (res_target, init_aff, [("output_image", "moving_image")]),
        (init_aff, norm, [("output_transform", "initial_moving_transform")]),
        (norm, map_brainmask, [
            ("reverse_transforms", "transforms"),
            ("reverse_invert_flags", "invert_transform_flags"),
        ]),
        (map_brainmask, thr_brainmask, [("output_image", "input_image")]),
        (thr_brainmask, dil_brainmask, [("output_image", "op1")]),
        (dil_brainmask, get_brainmask, [("output_image", "op1")]),
        (inu_n4_final, apply_mask, [("output_image", "in_file")]),
        (get_brainmask, apply_mask, [("output_image", "mask_file")]),
        (get_brainmask, copy_xform, [("output_image", "out_mask")]),
        (apply_mask, copy_xform, [("out_file", "out_file")]),
        (inu_n4_final, copy_xform, [
            ("output_image", "bias_corrected"),
            ("bias_image", "bias_image"),
        ]),
        (copy_xform, outputnode, [
            ("out_file", "out_file"),
            ("out_mask", "out_mask"),
            ("bias_corrected", "bias_corrected"),
            ("bias_image", "bias_image"),
        ]),
    ])
    # fmt: on

    if use_laplacian:
        lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="1.5 1"),
                           name="lap_tmpl")
        lap_tmpl.inputs.op1 = tpl_target_path
        lap_target = pe.Node(ImageMath(operation="Laplacian", op2="1.5 1"),
                             name="lap_target")
        mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
        mrg_tmpl.inputs.in1 = tpl_target_path
        mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
        # fmt: off
        wf.connect([
            (inu_n4, lap_target, [(("output_image", _pop), "op1")]),
            (lap_tmpl, mrg_tmpl, [("output_image", "in2")]),
            (inu_n4, mrg_target, [("output_image", "in1")]),
            (lap_target, mrg_target, [("output_image", "in2")]),
            (mrg_tmpl, norm, [("out", "fixed_image")]),
            (mrg_target, norm, [("out", "moving_image")]),
        ])
        # fmt: on

    else:
        norm.inputs.fixed_image = tpl_target_path
        # fmt: off
        wf.connect([
            (inu_n4, norm, [(("output_image", _pop), "moving_image")]),
        ])
        # fmt: on

    if atropos_refine:
        atropos_model = atropos_model or list(
            ATROPOS_MODELS[bids_suffix].values())
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model,
        )
        sel_wm = pe.Node(
            niu.Select(index=atropos_model[-1] - 1),
            name="sel_wm",
            run_without_submitting=True,
        )

        # fmt: off
        wf.disconnect([
            (get_brainmask, apply_mask, [("output_image", "mask_file")]),
            (copy_xform, outputnode, [("out_mask", "out_mask")]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [("output_image", "inputnode.in_files")]),
            (thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask")
                                         ]),
            (get_brainmask, atropos_wf, [
                ("output_image", "inputnode.in_mask_dilated"),
            ]),
            (atropos_wf, sel_wm, [("outputnode.out_tpms", "inlist")]),
            (sel_wm, inu_n4_final, [("out", "weight_image")]),
            (atropos_wf, apply_mask, [("outputnode.out_mask", "mask_file")]),
            (atropos_wf, outputnode, [
                ("outputnode.out_mask", "out_mask"),
                ("outputnode.out_segm", "out_segm"),
                ("outputnode.out_tpms", "out_tpms"),
            ]),
        ])
        # fmt: on
    return wf
예제 #16
0
    string_list = [c2, c3]
    return (first_tissue, string_list)


pre_merge = Node(Function(input_names=['c1', 'c2', 'c3'],
                          output_names=['first_tissue', 'string_list'],
                          function=extract_tissue_c123),
                 name='Pre_Merge_Tissues')

merge_tissues = Node(MultiImageMaths(), name="Merge_C1_C2_C3")
merge_tissues.inputs.op_string = "-add %s -add %s -thr 0.05 -bin"

fill_mask = Node(UnaryMaths(), name="FillHoles_Mask")
fill_mask.inputs.operation = "fillh"

apply_mask_t1 = Node(ApplyMask(), name="ApplyMask_T1")
apply_mask_flair = Node(ApplyMask(), name="ApplyMask_FLAIR")
apply_mask_swi = Node(ApplyMask(), name="ApplyMask_SWI")
apply_mask_bct1 = Node(ApplyMask(), name="ApplyMask_BiasCorrect_T1")

###SNR
#Tissue 1-3 mask construction and HeadMask construction.
con_tissue_mask_1 = Node(Threshold(), name="Tissue1_Mask")
con_tissue_mask_1.inputs.thresh = 0.1
con_tissue_mask_1.inputs.args = "-bin"
con_tissue_mask_2 = Node(Threshold(), name="Tissue2_Mask")
con_tissue_mask_2.inputs.thresh = 0.1
con_tissue_mask_2.inputs.args = "-bin"
con_tissue_mask_3 = Node(Threshold(), name="Tissue3_Mask")
con_tissue_mask_3.inputs.thresh = 0.1
con_tissue_mask_3.inputs.args = "-bin"
예제 #17
0
파일: ants.py 프로젝트: gkiar/C-PAC
def init_brain_extraction_wf(tpl_target_path,
                             tpl_mask_path,
                             tpl_regmask_path,
                             name='brain_extraction_wf',
                             template_spec=None,
                             use_float=True,
                             normalization_quality='precise',
                             omp_nthreads=None,
                             mem_gb=3.0,
                             bids_suffix='T1w',
                             atropos_refine=True,
                             atropos_use_random_seed=True,
                             atropos_model=None,
                             use_laplacian=True,
                             bspline_fitting_distance=200):
    """
    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).
    The official workflow is built as follows (and this implementation
    follows the same organization):
      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask
    .. workflow::
        :graph2use: orig
        :simple_form: yes
        from niworkflows.anat import init_brain_extraction_wf
        wf = init_brain_extraction_wf()
    **Parameters**
        in_template : str
            Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
            path).
            The brain template from which regions will be projected
            Anatomical template created using e.g. LPBA40 data set with
            ``buildtemplateparallel.sh`` in ANTs.
            The workflow will automatically search for a brain probability
            mask created using e.g. LPBA40 data set which have brain masks
            defined, and warped to anatomical template and
            averaged resulting in a probability image.
        use_float : bool
            Whether single precision should be used
        normalization_quality : str
            Use more precise or faster registration parameters
            (default: ``precise``, other possible values: ``testing``)
        omp_nthreads : int
            Maximum number of threads an individual process may use
        mem_gb : float
            Estimated peak memory consumption of the most hungry nodes
            in the workflow
        bids_suffix : str
            Sequence type of the first input image. For a list of acceptable values
            see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
        atropos_refine : bool
            Enables or disables the whole ATROPOS sub-workflow
        atropos_use_random_seed : bool
            Whether ATROPOS should generate a random seed based on the
            system's clock
        atropos_model : tuple or None
            Allows to specify a particular segmentation model, overwriting
            the defaults based on ``bids_suffix``
        use_laplacian : bool
            Enables or disables alignment of the Laplacian as an additional
            criterion for image registration quality (default: True)
        bspline_fitting_distance : float
            The size of the b-spline mesh grid elements, in mm (default: 200)
        name : str, optional
            Workflow name (default: antsBrainExtraction)
    **Inputs**
        in_files
            List of input anatomical images to be brain-extracted,
            typically T1-weighted.
            If a list of anatomical images is provided, subsequently
            specified images are used during the segmentation process.
            However, only the first image is used in the registration
            of priors.
            Our suggestion would be to specify the T1w as the first image.
        in_mask
            (optional) Mask used for registration to limit the metric
            computation to a specific region.
    **Outputs**
        out_file
            Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
        out_mask
            Calculated brain mask
        bias_corrected
            The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
            correction, before skull-stripping.
        bias_image
            The :abbr:`INU (intensity non-uniformity)` field estimated for each
            input in ``in_files``
        out_segm
            Output segmentation by ATROPOS
        out_tpms
            Output :abbr:`TPMs (tissue probability maps)` by ATROPOS
    """
    # from templateflow.api import get as get_template
    wf = pe.Workflow(name)

    template_spec = template_spec or {}

    # suffix passed via spec takes precedence
    template_spec['suffix'] = template_spec.get('suffix', bids_suffix)

    # # Get probabilistic brain mask if available
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
                        name='inputnode')

    # # Try to find a registration mask, set if available
    if tpl_regmask_path:
        inputnode.inputs.in_mask = str(tpl_regmask_path)

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm',
        'out_tpms'
    ]),
                         name='outputnode')

    copy_xform = pe.Node(CopyXForm(
        fields=['out_file', 'out_mask', 'bias_corrected', 'bias_image']),
                         name='copy_xform',
                         run_without_submitting=True,
                         mem_gb=2.5)

    trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity',
                                 op2='0.01 0.999 256'),
                       name='truncate_images',
                       iterfield=['op1'])
    inu_n4 = pe.MapNode(N4BiasFieldCorrection(
        dimension=3,
        save_bias=False,
        copy_header=True,
        n_iterations=[50] * 4,
        convergence_threshold=1e-7,
        shrink_factor=4,
        bspline_fitting_distance=bspline_fitting_distance),
                        n_procs=omp_nthreads,
                        name='inu_n4',
                        iterfield=['input_image'])

    res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                              apply_smoothing=True),
                       name='res_tmpl')
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                                apply_smoothing=True),
                         name='res_target')

    lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                       name='lap_tmpl')
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                         name='lap_target')
    mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name='mrg_target')

    # Initialize transforms with antsAI
    init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.25),
                          transform=('Affine', 0.1),
                          search_factor=(15, 0.1),
                          principal_axes=False,
                          convergence=(10, 1e-6, 10),
                          verbose=True),
                       name='init_aff',
                       n_procs=omp_nthreads)

    # Tolerate missing ANTs at construction time
    _ants_version = Registration().version
    if _ants_version and parseversion(_ants_version) >= Version('2.3.0'):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    settings_file = 'antsBrainExtraction_%s.json' if use_laplacian \
        else 'antsBrainExtractionNoLaplacian_%s.json'
    norm = pe.Node(Registration(
        from_file=pkgr_fn('CPAC.anat_preproc', 'data/' +
                          settings_file % normalization_quality)),
                   name='norm',
                   n_procs=omp_nthreads,
                   mem_gb=mem_gb)
    norm.inputs.float = use_float
    fixed_mask_trait = 'fixed_image_mask'
    if _ants_version and parseversion(_ants_version) >= Version('2.2.0'):
        fixed_mask_trait += 's'

    map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian',
                                            float=True),
                            name='map_brainmask',
                            mem_gb=1)
    map_brainmask.inputs.input_image = str(tpl_mask_path)

    thr_brainmask = pe.Node(ThresholdImage(dimension=3,
                                           th_low=0.5,
                                           th_high=1.0,
                                           inside_value=1,
                                           outside_value=0),
                            name='thr_brainmask')

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'),
                            name='dil_brainmask')
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'),
                            name='get_brainmask')

    # Refine INU correction
    inu_n4_final = pe.MapNode(N4BiasFieldCorrection(
        dimension=3,
        save_bias=True,
        copy_header=True,
        n_iterations=[50] * 5,
        convergence_threshold=1e-7,
        shrink_factor=4,
        bspline_fitting_distance=bspline_fitting_distance),
                              n_procs=omp_nthreads,
                              name='inu_n4_final',
                              iterfield=['input_image'])

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=['in_file'],
                            name='apply_mask')

    wf.connect([
        (inputnode, trunc, [('in_files', 'op1')]),
        (inputnode, copy_xform, [(('in_files', _pop), 'hdr_file')]),
        (inputnode, inu_n4_final, [('in_files', 'input_image')]),
        (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
        (inputnode, norm, [('in_mask', fixed_mask_trait)]),
        (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
        (trunc, inu_n4, [('output_image', 'input_image')]),
        (inu_n4, res_target, [(('output_image', _pop), 'input_image')]),
        (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
        (res_target, init_aff, [('output_image', 'moving_image')]),
        (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
        (norm, map_brainmask, [('reverse_transforms', 'transforms'),
                               ('reverse_invert_flags',
                                'invert_transform_flags')]),
        (map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
        (thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
        (dil_brainmask, get_brainmask, [('output_image', 'op1')]),
        (inu_n4_final, apply_mask, [('output_image', 'in_file')]),
        (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        (get_brainmask, copy_xform, [('output_image', 'out_mask')]),
        (apply_mask, copy_xform, [('out_file', 'out_file')]),
        (inu_n4_final, copy_xform, [('output_image', 'bias_corrected'),
                                    ('bias_image', 'bias_image')]),
        (copy_xform, outputnode, [('out_file', 'out_file'),
                                  ('out_mask', 'out_mask'),
                                  ('bias_corrected', 'bias_corrected'),
                                  ('bias_image', 'bias_image')]),
    ])

    if use_laplacian:
        lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                           name='lap_tmpl')
        lap_tmpl.inputs.op1 = tpl_target_path
        lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                             name='lap_target')
        mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
        mrg_tmpl.inputs.in1 = tpl_target_path
        mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
        wf.connect([
            (inu_n4, lap_target, [(('output_image', _pop), 'op1')]),
            (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
            (inu_n4, mrg_target, [('output_image', 'in1')]),
            (lap_target, mrg_target, [('output_image', 'in2')]),
            (mrg_tmpl, norm, [('out', 'fixed_image')]),
            (mrg_target, norm, [('out', 'moving_image')]),
        ])
    else:
        norm.inputs.fixed_image = tpl_target_path
        wf.connect([
            (inu_n4, norm, [(('output_image', _pop), 'moving_image')]),
        ])

    if atropos_refine:
        atropos_model = atropos_model or list(
            ATROPOS_MODELS[bids_suffix].values())
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model,
        )
        sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
                         name='sel_wm',
                         run_without_submitting=True)

        wf.disconnect([
            (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
            (copy_xform, outputnode, [('out_mask', 'out_mask')]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]),
            (thr_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask')
                                         ]),
            (get_brainmask, atropos_wf, [('output_image',
                                          'inputnode.in_mask_dilated')]),
            (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
            (sel_wm, inu_n4_final, [('out', 'weight_image')]),
            (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]),
            (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask'),
                                      ('outputnode.out_segm', 'out_segm'),
                                      ('outputnode.out_tpms', 'out_tpms')]),
        ])
    return wf
예제 #18
0
        regT12CT.inputs.num_dimensions = 3
        regT12CT.inputs.num_threads = 4

        reg_nodes = []
        for i in range(3):
            reg = nipype.MapNode(interface=AntsRegSyn(), iterfield=['input_file', 'ref_file'],
                                 name='ants_reg{}'.format(i))
            reg.inputs.transformation = 'r'
            reg.inputs.num_dimensions = 3
            reg.inputs.num_threads = 4
            reg.inputs.interpolation = 'BSpline'
            reg_nodes.append(reg)

        apply_mask_nodes = []
        for i in range(3):
            masking = nipype.MapNode(interface=ApplyMask(), iterfield=['in_file', 'mask_file'],
                                     name='masking{}'.format(i))
            apply_mask_nodes.append(masking)

        apply_ts_nodes = []
        for i in range(3):
            apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                      iterfield=['input_image', 'transforms'],
                                      name='apply_ts{}'.format(i))
            apply_ts_nodes.append(apply_ts)

        merge_nodes = []
        for i in range(3):
            merge = nipype.MapNode(interface=Merge(4),
                                     iterfield=['in1', 'in2', 'in3', 'in4'],
                                     name='merge{}'.format(i))