예제 #1
0
    def create(self):
        trainT1s = Node(interface=Select(), name='trainT1s')
        trainT2s = Node(interface=Select(), name='trainT2s')
        trainLabels = Node(interface=Select(), name='trainLabels')
        testT1s = Node(interface=Select(), name='testT1s')

        #intensityImages = Node(interface=Merge(2), name='intensityImages')

        jointFusion = Node(interface=JointFusion(), name='jointFusion')
        jointFusion.inputs.num_threads = -1
        jointFusion.inputs.dimension = 3
        jointFusion.inputs.modalities = 1  #TODO: verify 2 for T1/T2
        jointFusion.inputs.method = "Joint[0.1,2]"  # this does not work
        jointFusion.inputs.output_label_image = 'fusion_neuro2012_20.nii.gz'

        outputs = Node(
            interface=IdentityInterface(fields=['output_label_image']),
            run_without_submitting=True,
            name='outputspec')

        self.connect([  # Don't worry about T2s now per Regina
            # (trainT1s, intensityImages, [('out', 'in1')]),
            # (trainT2s, intensityImages, [('out', 'in2')]),
            (testT1s, jointFusion, [('out', 'target_image')]),
            (trainT1s, jointFusion, [('out', 'warped_intensity_images')]),
            (trainLabels, jointFusion, [('out', 'warped_label_images')]),
            (jointFusion, outputs, [('output_label_image',
                                     'output_label_image')]),
        ])
예제 #2
0
def workflow(filename, outputType, name):
    seedworkflow = pipe.Workflow(name=name)
    labels, seeds = getAtlasPoints(filename)  # Create seed points

    seedsIdentity = pipe.Node(interface=IdentityInterface(fields=['index']), name='seedsIdentity')
    seedsIdentity.iterables = ('index', range(len(labels)))

    selectSeed = pipe.Node(interface=Select(), name='selectSeed')
    selectSeed.inputs.inlist = seeds

    selectLabel = pipe.Node(interface=Select(), name='selectLabel')
    selectLabel.inputs.inlist = labels

    points = pipe.Node(interface=Function(function=createSphereExpression,
                                          input_names=['coordinates', 'radius'],
                                          output_names=['expression']),
                       name='createSphereExpression')

    spheres = pipe.Node(interface=Calc(letters=['a']), name='afni3Dcalc_seeds')
    spheres.inputs.outputtype = outputType
    spheres.inputs.args = '-nscale'

    seedworkflow.connect([(seedsIdentity, selectSeed, [('index', 'index')]),
                      (seedsIdentity, selectLabel, [('index', 'index')]),
                      (selectSeed, points, [('out', 'coordinates')]),
                      (points, spheres, [('expression', 'expr')])])
    return seedworkflow
예제 #3
0
def test_Select_outputs():
    output_map = dict(out=dict(),
    )
    outputs = Select.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
예제 #4
0
    def create(self):
        sampleT1s = Node(interface=Select(), name='sampleT1s')
        sampleT2s = Node(interface=Select(), name='sampleT2s')
        sampleLabels = Node(interface=Select(), name='sampleLabels')

        testT1s = Node(interface=Select(), name='testT1s')
        testT2s = Node(interface=Select(), name='testT2s')
        testLabels = Node(interface=Select(), name='testLabels')

        intensityImages = Node(interface=Merge(2), name='intensityImages')

        jointFusion = Node(interface=JointFusion(), name='jointFusion')
        jointFusion.inputs.dimension = 3
        jointFusion.inputs.modalities = 1  #TODO: verify 2 for T1/T2
        jointFusion.inputs.method = 'Joint[0.1, 2]'
        jointFusion.inputs.output_label_image = 'fusion_neuro2012_20.nii.gz'

        outputs = Node(
            interface=IdentityInterface(fields=['output_label_image']),
            run_without_submitting=True,
            name='outputspec')

        self.connect([  # Don't worry about T2s now per Regina
            # (sampleT1s, intensityImages, [('out', 'in1')]),
            # (sampleT2s, intensityImages, [('out', 'in2')]),
            # (intensityImages, jointFusion, [('out', 'warped_intensity_images')]),
            (sampleT1s, jointFusion, [('out', 'warped_intensity_images')]),
            #END: per Regina
            (sampleLabels, jointFusion, [('out', 'warped_label_images')]),
            (jointFusion, outputs, [('output_label_image',
                                     'output_label_image')]),
        ])
예제 #5
0
def test_Select_inputs():
    input_map = dict(ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    index=dict(mandatory=True,
    ),
    inlist=dict(mandatory=True,
    ),
    )
    inputs = Select.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
예제 #6
0
    def create(self):
        """
        This function...

        :param self:
        :return:
        """
        trainT1s = Node(interface=Select(), name="trainT1s")
        trainT2s = Node(interface=Select(), name="trainT2s")
        trainLabels = Node(interface=Select(), name="trainLabels")
        testT1s = Node(interface=Select(), name="testT1s")

        # intensityImages = Node(interface=Merge(2), name='intensityImages')

        jointFusion = Node(interface=JointFusion(), name="jointFusion")
        jointFusion.inputs.num_threads = -1
        jointFusion.inputs.dimension = 3
        jointFusion.inputs.modalities = 1  # TODO: verify 2 for T1/T2
        jointFusion.inputs.method = "Joint[0.1,2]"  # this does not work
        jointFusion.inputs.output_label_image = "fusion_neuro2012_20.nii.gz"

        outputs = Node(
            interface=IdentityInterface(fields=["output_label_image"]),
            run_without_submitting=True,
            name="outputspec",
        )

        self.connect(
            [  # Don't worry about T2s now per Regina
                # (trainT1s, intensityImages, [('out', 'in1')]),
                # (trainT2s, intensityImages, [('out', 'in2')]),
                (testT1s, jointFusion, [("out", "target_image")]),
                (trainT1s, jointFusion, [("out", "warped_intensity_images")]),
                (trainLabels, jointFusion, [("out", "warped_label_images")]),
                (jointFusion, outputs, [("output_label_image", "output_label_image")]),
            ]
        )
예제 #7
0
    def qsm_pipeline(self, **name_maps):
        """
        Process dual echo data for QSM (TE=[7.38, 22.14])

        NB: Default values come from the STI-Suite
        """
        pipeline = self.new_pipeline(
            name='qsm_pipeline',
            name_maps=name_maps,
            desc="Resolve QSM from t2star coils",
            citations=[sti_cites, fsl_cite, matlab_cite])

        erosion = pipeline.add(
            'mask_erosion',
            fsl.ErodeImage(kernel_shape='sphere',
                           kernel_size=self.parameter('qsm_erosion_size'),
                           output_type='NIFTI'),
            inputs={'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=15,
            mem_gb=12)

        # If we have multiple echoes we can combine the phase images from
        # each channel into a single image. Otherwise for single echo sequences
        # we need to perform QSM on each coil separately and then combine
        # afterwards.
        if self.branch('qsm_dual_echo'):
            # Combine channels to produce phase and magnitude images
            channel_combine = pipeline.add(
                'channel_combine',
                HIPCombineChannels(),
                inputs={
                    'magnitudes_dir': ('mag_channels', multi_nifti_gz_format),
                    'phases_dir': ('phase_channels', multi_nifti_gz_format)
                })

            # Unwrap phase using Laplacian unwrapping
            unwrap = pipeline.add(
                'unwrap',
                UnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (channel_combine, 'phase')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Remove background noise
            vsharp = pipeline.add(
                "vsharp",
                VSharp(mask_manip="imerode({}>0, ball(5))"),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (unwrap, 'out_file'),
                    'mask': (erosion, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Run QSM iLSQR
            pipeline.add('qsmrecon',
                         QSMiLSQR(mask_manip="{}>0",
                                  padsize=self.parameter('qsm_padding')),
                         inputs={
                             'voxelsize': ('voxel_sizes', float),
                             'te': ('echo_times', float),
                             'B0': ('main_field_strength', float),
                             'H': ('main_field_orient', float),
                             'in_file': (vsharp, 'out_file'),
                             'mask': (vsharp, 'new_mask')
                         },
                         outputs={'qsm': ('qsm', nifti_format)},
                         requirements=[matlab_req.v('r2017a'),
                                       sti_req.v(2.2)])

        else:
            # Dialate eroded mask
            dialate = pipeline.add(
                'dialate',
                DialateMask(dialation=self.parameter('qsm_mask_dialation')),
                inputs={'in_file': (erosion, 'out_file')},
                requirements=[matlab_req.v('r2017a')])

            # List files for the phases of separate channel
            list_phases = pipeline.add(
                'list_phases',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={
                    'directory': ('phase_channels', multi_nifti_gz_format)
                })

            # List files for the phases of separate channel
            list_mags = pipeline.add(
                'list_mags',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={'directory': ('mag_channels', multi_nifti_gz_format)})

            # Generate coil specific masks
            mask_coils = pipeline.add(
                'mask_coils',
                MaskCoils(dialation=self.parameter('qsm_mask_dialation')),
                inputs={
                    'masks': (list_mags, 'files'),
                    'whole_brain_mask': (dialate, 'out_file')
                },
                requirements=[matlab_req.v('r2017a')])

            # Unwrap phase
            unwrap = pipeline.add(
                'unwrap',
                BatchUnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (list_phases, 'files')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Background phase removal
            vsharp = pipeline.add(
                "vsharp",
                BatchVSharp(mask_manip='{}>0'),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'mask': (mask_coils, 'out_files'),
                    'in_file': (unwrap, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            first_echo_time = pipeline.add(
                'first_echo',
                Select(index=0),
                inputs={'inlist': ('echo_times', float)})

            # Perform channel-wise QSM
            coil_qsm = pipeline.add(
                'coil_qsmrecon',
                BatchQSMiLSQR(mask_manip="{}>0",
                              padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'B0': ('main_field_strength', float),
                    'H': ('main_field_orient', float),
                    'in_file': (vsharp, 'out_file'),
                    'mask': (vsharp, 'new_mask'),
                    'te': (first_echo_time, 'out')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)],
                wall_time=45)  # FIXME: Should be dependent on number of coils

            # Combine channel QSM by taking the median coil value
            pipeline.add('combine_qsm',
                         MedianInMasks(),
                         inputs={
                             'channels': (coil_qsm, 'out_file'),
                             'channel_masks': (vsharp, 'new_mask'),
                             'whole_brain_mask': (dialate, 'out_file')
                         },
                         outputs={'qsm': ('out_file', nifti_format)},
                         requirements=[matlab_req.v('r2017a')])
        return pipeline
예제 #8
0
파일: pvc.py 프로젝트: zuxfoucault/pypes
def petpvc_workflow(wf_name="petpvc"):
    """ Run the PET pre-processing workflow against the gunzip_pet.in_file files.
    It coregisters the reference_file and tissues to PET space, then applies PVC and grey matter normalization.

    It does:
    - SPM12 Coregister T1 and tisues to PET
    - PVC the PET image in PET space

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pvc_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pvc_input.reference_file: traits.File
        The anatomical image in its native space. For registration reference.

    pvc_input.tissues: list of traits.File
        List of tissues files from the New Segment process. At least the first
        3 tissues must be present.

    Nipype outputs
    --------------
    pvc_output.coreg_ref: existing file
        The coregistered reference_file image in PET space.

    pvc_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pvc_output.pvc_out: existing file
        The output of the PETPVC process.

    pvc_output.petpvc_mask: existing file
        The mask built for the PETPVC.

    pvc_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pvc_output.gm_norm: existing file
        The output of the grey matter intensity normalization process.
        This is the last step in the PET signal correction.

    Returns
    -------
    wf: nipype Workflow
    """
    # fixed parameters of the NUK mMR
    psf_fwhm = (4.3, 4.3, 4.3)

    # specify input and output fields
    in_fields = [
        "in_file",
        "reference_file",
        "tissues",
    ]

    out_fields = [
        "coreg_ref",
        "coreg_others",
        "pvc_out",
        "petpvc_mask",
        "brain_mask",
        "gm_norm",
    ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="pvc_input")

    flat_list = pe.Node(Function(input_names=['list_of_lists'],
                                 output_names=['out'],
                                 function=flatten_list),
                        name='flatten_tissue_list')

    # coreg pet
    gunzip_pet = setup_node(Gunzip(), name="gunzip_pet")
    coreg_pet = setup_node(spm_coregister(cost_function="mi"),
                           name="coreg_pet")

    tissues_sel = setup_node(Select(index=[0, 1, 2]), name="tissues")
    select_gm = setup_node(Select(index=[0]), name="select_gm")
    pvc = setup_node(petpvc_cmd(fwhm_mm=psf_fwhm, pvc_method='RBV'),
                     name="pvc")

    # output
    pvc_output = setup_node(IdentityInterface(fields=out_fields),
                            name="pvc_output")

    # workflow to create the mask
    mask_wf = petpvc_mask(wf_name="petpvc_mask")

    # workflow for intensity normalization
    norm_wf = intensity_norm(wf_name="intensity_norm_gm")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
        # inputs
        (pet_input, gunzip_pet, [("in_file", "in_file")]),
        (pet_input, tissues_sel, [("tissues", "inlist")]),
    ])

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
            # inputs
            (pet_input, coreg_pet, [("reference_file", "source")]),

            # unzip to coregister the reference file (anatomical image) to PET space.
            (gunzip_pet, coreg_pet, [("out_file", "target")]),
            (tissues_sel, flat_list, [("out", "list_of_lists")]),
            (flat_list, coreg_pet, [("out", "apply_to_files")]),

            # the list of tissues to the mask wf and the GM for PET intensity normalization
            (coreg_pet, select_gm, [("coregistered_files", "inlist")]),
            (coreg_pet, mask_wf, [("coregistered_files",
                                   "pvcmask_input.tissues")]),

            # the PET in native space to PVC correction
            (gunzip_pet, pvc, [("out_file", "in_file")]),

            # the merged file with 4 tissues to PCV correction
            (mask_wf, pvc, [("pvcmask_output.petpvc_mask", "mask_file")]),

            # normalize voxel values of PET PVCed by demeaning it entirely by GM PET voxel values
            (pvc, norm_wf, [("out_file", "intnorm_input.source")]),
            (select_gm, norm_wf, [("out", "intnorm_input.mask")]),

            # output
            (coreg_pet, pvc_output, [("coregistered_source", "coreg_ref")]),
            (coreg_pet, pvc_output, [("coregistered_files", "coreg_others")]),
            (pvc, pvc_output, [("out_file", "pvc_out")]),
            (mask_wf, pvc_output, [("pvcmask_output.brain_mask", "brain_mask")
                                   ]),
            (mask_wf, pvc_output, [("pvcmask_output.petpvc_mask",
                                    "petpvc_mask")]),
            (norm_wf, pvc_output, [("intnorm_output.out_file", "gm_norm")]),
        ])
    else:  # PET to ANAT
        wf.connect([
            # inputs
            (pet_input, coreg_pet, [("reference_file", "target")]),

            # unzip PET image and set as a source to register it to anatomical space.
            (gunzip_pet, coreg_pet, [("out_file", "source")]),
            (tissues_sel, flat_list, [("out", "list_of_lists")]),
            (flat_list, coreg_pet, [("out", "apply_to_files")]),

            # the list of tissues to the mask wf and the GM for PET intensity normalization
            (tissues_sel, select_gm, [("out", "inlist")]),
            (flat_list, mask_wf, [("out", "pvcmask_input.tissues")]),

            # the PET in ANAT space to PVC correction
            (coreg_pet, pvc, [("coregistered_source", "in_file")]),

            # the merged file with 4 tissues to PCV correction
            (mask_wf, pvc, [("pvcmask_output.petpvc_mask", "mask_file")]),

            # normalize voxel values of PET PVCed by demeaning it entirely by GM PET voxel values
            (pvc, norm_wf, [("out_file", "intnorm_input.source")]),
            (select_gm, norm_wf, [("out", "intnorm_input.mask")]),

            # output
            # TODO: coreg_ref should have a different name in this case
            (coreg_pet, pvc_output, [("coregistered_source", "coreg_ref")]),
            (coreg_pet, pvc_output, [("coregistered_files", "coreg_others")]),
            (pvc, pvc_output, [("out_file", "pvc_out")]),
            (mask_wf, pvc_output, [("pvcmask_output.brain_mask", "brain_mask")
                                   ]),
            (mask_wf, pvc_output, [("pvcmask_output.petpvc_mask",
                                    "petpvc_mask")]),
            (norm_wf, pvc_output, [("intnorm_output.out_file", "gm_norm")]),
        ])

    return wf
예제 #9
0
def fmri_cleanup_wf(wf_name="fmri_cleanup"):
    """ Run the resting-state fMRI pre-processing workflow against the rest files in `data_dir`.

    Tasks:
    - Trim first 6 volumes of the rs-fMRI file.
    - Slice Timing correction.
    - Motion and nuisance correction.
    - Calculate brain mask in fMRI space.
    - Bandpass frequency filtering for resting-state fMRI.
    - Smoothing.
    - Tissue maps co-registration to fMRI space.

    Parameters
    ----------
    wf_name: str

    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The resting-state fMRI file.

    rest_input.anat: traits.File
        Path to the high-contrast anatomical image.

    rest_input.tissues: list of traits.File
        Paths to the tissue segmentations in anatomical space.
        Expected to have this order: GM, WM and CSF.

    rest_input.highpass_sigma:traits.Float
        Band pass timeseries filter higher bound in Hz.

    rest_input.lowpass_sigma: traits.Float
        Band pass timeseries filter lower bound in Hz.

    Nipype Outputs
    --------------
    rest_output.smooth: traits.File
        The isotropically smoothed time filtered nuisance corrected image.

    rest_output.nuis_corrected: traits.File
        The nuisance corrected fMRI file.

    rest_output.motion_params: traits.File
        The affine transformation file.

    rest_output.time_filtered: traits.File
        The bandpass time filtered fMRI file.

    rest_output.epi_brain_mask: traits.File
        An estimated brain mask from mean EPI volume.

    rest_output.tissues_brain_mask: traits.File
        A brain mask calculated from the addition of coregistered
        GM, WM and CSF segmentation volumes from the anatomical
        segmentation.

    rest_output.tissues: list of traits.File
        The tissues segmentation volume in fMRI space.
        Expected to have this order: GM, WM and CSF.

    rest_output.anat: traits.File
        The T1w image in fMRI space.

    rest_output.avg_epi: traits.File
        The average EPI image in fMRI space after slice-time and motion correction.

    rest_output.motion_regressors: traits.File

    rest_output.compcor_regressors: traits.File

    rest_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_output.art_intensity_files
        One file containing the global intensity values determined from the brainmask.

    rest_output.art_norm_files
        One file containing the composite norm.

    rest_output.art_outlier_files
         One file containing a list of 0-based indices corresponding to outlier volumes.

    rest_output.art_plot_files
        One image file containing the detected outliers.

    rest_output.art_statistic_files
        One file containing information about the different types of artifacts and if design info is provided then
        details of stimulus correlated motion and a listing or artifacts by event type.

    Returns
    -------
    wf: nipype Workflow
    """
    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_file",
        "anat",
        "atlas_anat",
        "coreg_target",
        "tissues",
        "lowpass_freq",
        "highpass_freq",
    ]

    out_fields = [
        "motion_corrected",
        "motion_params",
        "tissues",
        "anat",
        "avg_epi",
        "time_filtered",
        "smooth",
        "tsnr_file",
        "epi_brain_mask",
        "tissues_brain_mask",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "nuis_corrected",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                            name="rest_input")

    # rs-fMRI preprocessing nodes
    trim = setup_node(Trim(), name="trim")

    stc_wf = auto_spm_slicetime()
    realign = setup_node(nipy_motion_correction(), name='realign')

    # average
    average = setup_node(
        Function(
            function=mean_img,
            input_names=["in_file"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']
        ),
        name='average_epi'
    )

    mean_gunzip = setup_node(Gunzip(), name="mean_gunzip")

    # co-registration nodes
    coreg = setup_node(spm_coregister(cost_function="mi"), name="coreg_fmri")
    brain_sel = setup_node(Select(index=[0, 1, 2]), name="brain_sel")

    # brain mask made with EPI
    epi_mask = setup_node(ComputeMask(), name='epi_mask')

    # brain mask made with the merge of the tissue segmentations
    tissue_mask = setup_node(fsl.MultiImageMaths(), name='tissue_mask')
    tissue_mask.inputs.op_string = "-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin"
    tissue_mask.inputs.out_file = "tissue_brain_mask.nii.gz"

    # select tissues
    gm_select = setup_node(Select(index=[0]), name="gm_sel")
    wmcsf_select = setup_node(Select(index=[1, 2]), name="wmcsf_sel")

    # noise filter
    noise_wf = rest_noise_filter_wf()
    wm_select = setup_node(Select(index=[1]), name="wm_sel")
    csf_select = setup_node(Select(index=[2]), name="csf_sel")

    # bandpass filtering
    bandpass = setup_node(
        Function(
            input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'],
            output_names=['out_files'],
            function=bandpass_filter
        ),
        name='bandpass'
    )

    # smooth
    smooth = setup_node(
        Function(
            function=smooth_img,
            input_names=["in_file", "fwhm"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']
        ),
        name="smooth"
    )
    smooth.inputs.fwhm = get_config_setting('fmri_smooth.fwhm', default=8)
    smooth.inputs.out_file = "smooth_std_{}.nii.gz".format(wf_name)

    # output identities
    rest_output = setup_node(IdentityInterface(fields=out_fields), name="rest_output")

    # Connect the nodes
    wf.connect([
        # trim
        (rest_input, trim, [("in_file", "in_file")]),

        # slice time correction
        (trim, stc_wf, [("out_file", "stc_input.in_file")]),

        # motion correction
        (stc_wf, realign, [("stc_output.timecorrected_files", "in_file")]),

        # coregistration target
        (realign, average, [("out_file", "in_file")]),
        (average, mean_gunzip, [("out_file", "in_file")]),
        (mean_gunzip, coreg, [("out_file", "target")]),

        # epi brain mask
        (average, epi_mask, [("out_file", "mean_volume")]),

        # coregistration
        (rest_input, coreg, [("anat", "source")]),
        (rest_input, brain_sel, [("tissues", "inlist")]),
        (brain_sel, coreg, [(("out", flatten_list), "apply_to_files")]),

        # tissue brain mask
        (coreg, gm_select, [("coregistered_files", "inlist")]),
        (coreg, wmcsf_select, [("coregistered_files", "inlist")]),
        (gm_select, tissue_mask, [(("out", flatten_list), "in_file")]),
        (wmcsf_select, tissue_mask, [(("out", flatten_list), "operand_files")]),

        # nuisance correction
        (coreg, wm_select, [("coregistered_files", "inlist",)]),
        (coreg, csf_select, [("coregistered_files", "inlist",)]),
        (realign, noise_wf, [("out_file", "rest_noise_input.in_file",)]),
        (tissue_mask, noise_wf, [("out_file", "rest_noise_input.brain_mask")]),
        (wm_select, noise_wf, [(("out", flatten_list), "rest_noise_input.wm_mask")]),
        (csf_select, noise_wf, [(("out", flatten_list), "rest_noise_input.csf_mask")]),

        (realign, noise_wf, [("par_file", "rest_noise_input.motion_params",)]),

        # temporal filtering
        (noise_wf, bandpass, [("rest_noise_output.nuis_corrected", "files")]),
        # (realign,     bandpass,    [("out_file", "files")]),
        (stc_wf, bandpass, [("stc_output.time_repetition", "tr")]),
        (rest_input, bandpass, [
            ("lowpass_freq", "lowpass_freq"),
            ("highpass_freq", "highpass_freq"),
        ]),
        (bandpass, smooth, [("out_files", "in_file")]),

        # output
        (epi_mask, rest_output, [("brain_mask", "epi_brain_mask")]),
        (tissue_mask, rest_output, [("out_file", "tissues_brain_mask")]),
        (realign, rest_output, [
            ("out_file", "motion_corrected"),
            ("par_file", "motion_params"),
        ]),
        (coreg, rest_output, [
            ("coregistered_files", "tissues"),
            ("coregistered_source", "anat"),
        ]),
        (noise_wf, rest_output, [
            ("rest_noise_output.motion_regressors", "motion_regressors"),
            ("rest_noise_output.compcor_regressors", "compcor_regressors"),
            ("rest_noise_output.gsr_regressors", "gsr_regressors"),
            ("rest_noise_output.nuis_corrected", "nuis_corrected"),
            ("rest_noise_output.tsnr_file", "tsnr_file"),
            ("rest_noise_output.art_displacement_files", "art_displacement_files"),
            ("rest_noise_output.art_intensity_files", "art_intensity_files"),
            ("rest_noise_output.art_norm_files", "art_norm_files"),
            ("rest_noise_output.art_outlier_files", "art_outlier_files"),
            ("rest_noise_output.art_plot_files", "art_plot_files"),
            ("rest_noise_output.art_statistic_files", "art_statistic_files"),
        ]),
        (average, rest_output, [("out_file", "avg_epi")]),
        (bandpass, rest_output, [("out_files", "time_filtered")]),
        (smooth, rest_output, [("out_file", "smooth")]),
    ])

    return wf
예제 #10
0
def create_stage(stage_nr, workflow, inputs, inputs_nr_slices, slice_name):
    """
    Don't use this directly, see build_workflow() instead.

    Create an interpolation stage. Mutates the 'workflow' argument.
    """

    # Selectors into 'inputs'
    select_inputs = {}
    for i in range(inputs_nr_slices):
        fi = Function(input_names=['x', 'i'],
                      output_names=['out_file'],
                      function=select_function)

        select_inputs[i] = pe.Node(interface=fi,
                                   name='select_inputs_%s_%d_%d' % (
                                       slice_name,
                                       stage_nr,
                                       i,
                                   ))
        select_inputs[i].inputs.i = i
        workflow.connect(inputs, 'out_files', select_inputs[i], 'x')

    # Interpolations.
    interp_nodes = []
    for i in range(inputs_nr_slices - 1):
        interp_node = pe.Node(interface=InterpolateBetweenSlices(),
                              name='interp_%s_%d_%08d' % (
                                  slice_name,
                                  stage_nr,
                                  i,
                              ))

        select_node = pe.Node(interface=Select(index=[i, i + 1]),
                              name='select_%s_%d_%d_%d' % (
                                  slice_name,
                                  stage_nr,
                                  i,
                                  i + 1,
                              ))
        workflow.connect(inputs, 'out_files', select_node, 'inlist')
        workflow.connect(select_node, 'out', interp_node, 'slices')
        interp_node.inputs.level = stage_nr

        interp_nodes.append(interp_node)

    # Rename slices.
    renamers = []
    k = 0
    rename = pe.Node(interface=Rename(),
                     name='rename_%s_%d_%08d' % (
                         slice_name,
                         stage_nr,
                         k,
                     ))
    rename.inputs.format_string = 'slice_%08d.npz' % k
    workflow.connect(select_inputs[0], 'out_file', rename, 'in_file')
    renamers.append(rename)
    k += 1

    for i in range(len(interp_nodes)):
        rename = pe.Node(interface=Rename(),
                         name='rename_%s_%d_%08d' % (
                             slice_name,
                             stage_nr,
                             k,
                         ))
        rename.inputs.format_string = 'slice_%08d.npz' % k
        workflow.connect(interp_nodes[i], 'interpolated_slice', rename,
                         'in_file')
        renamers.append(rename)
        k += 1

        rename = pe.Node(interface=Rename(),
                         name='rename_%s_%d_%08d' % (
                             slice_name,
                             stage_nr,
                             k,
                         ))
        rename.inputs.format_string = 'slice_%08d.npz' % k
        workflow.connect(select_inputs[i + 1], 'out_file', rename, 'in_file')
        renamers.append(rename)
        k += 1

    # Could skip this unless we want to see intermediate steps.
    datasink = pe.Node(nio.DataSink(),
                       name='sinker_%s_%d' % (
                           slice_name,
                           stage_nr,
                       ))
    for (i, rename) in enumerate(renamers):
        workflow.connect(rename, 'out_file', datasink, 'slices.@%d' % i)

    # If we want to do another stage, use the out_file's of renamers.
    # We need a single node with an output 'out_files' consisting of each of the files.
    merge_renamed_files = pe.Node(interface=Merge(len(renamers)),
                                  name='merge_renamed_files_%s_%d' % (
                                      slice_name,
                                      stage_nr,
                                  ))
    for (i, rename) in enumerate(renamers):
        workflow.connect(rename, 'out_file', merge_renamed_files,
                         'in%d' % (i + 1))

    # Now rename the output 'out' to 'out_files' so we can pass it in to a recursive
    # call to this function.
    out_to_out_files = pe.Node(interface=Function(input_names=['x'],
                                                  output_names=['out_files'],
                                                  function=identity_function),
                               name='out_to_out_files_%s_%d' % (
                                   slice_name,
                                   stage_nr,
                               ))
    workflow.connect(merge_renamed_files, 'out', out_to_out_files, 'x')

    return out_to_out_files
예제 #11
0
# Transform Compositetransform to a warpfield to use it with CreateJacobian
# This caculates the warp field of both the affine and non affine transformations
# If you did then, then you have to add the intracranial volume as covariate
# That's why I am replacing it,
# calc_warp_field = Node(ants.ApplyTransforms(), name = 'Calc_Warp_Field')
# calc_warp_field.inputs.reference_image = study_based_template
# calc_warp_field.inputs.dimension = 3
# calc_warp_field.inputs.print_out_composite_warp_file = True
# calc_warp_field.inputs.output_image = 'Warp_Field.nii.gz'
#-----------------------------------------------------------------------------------------------------
# In[1]
# Here we just get the warp field
# Notice I changed reg_sub_to_temp.inputs.write_composite_transform to False
# So, it outputs the warpfield seperate from the affine transform

get_warp_field = Node(Select(), name='get_warp_field')
get_warp_field.inputs.index = [1]

#-----------------------------------------------------------------------------------------------------
# In[1]:
#Create jacobian determinant
jacobian = Node(ants.CreateJacobianDeterminantImage(),
                name='Calculate_Jacobian_Determinant')
jacobian.inputs.imageDimension = 3
jacobian.inputs.outputImage = 'Jacobian.nii.gz'

#-----------------------------------------------------------------------------------------------------
# In[1]:
#Tissue segmentation
atropos = Node(ants.Atropos(), name='Atropos')
def ANTs_cortical_thickness(subject_list, directory):

    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    import own_nipype
    from nipype.interfaces.ants.segmentation import antsCorticalThickness
    from nipype.interfaces.ants import ApplyTransforms
    from nipype.interfaces.ants import MultiplyImages
    from nipype.interfaces.utility import Function
    from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB
    from nipype.interfaces.ants.visualization import CreateTiledMosaic
    from nipype.interfaces.utility import Select
    from own_nipype import GM_DENSITY
    from nipype import SelectFiles
    import os

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(
        T1=
        '/imaging/jb07/CALM/CALM_BIDS/{subject_id}/anat/{subject_id}_T1w.nii.gz'
    )

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(directory)

    # Rigid alignment with the template space
    T1_rigid_quickSyN = pe.Node(interface=own_nipype.ants_QuickSyN(
        image_dimensions=3, transform_type='r'),
                                name='T1_rigid_quickSyN')
    T1_rigid_quickSyN.inputs.fixed_image = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0.nii.gz'

    # Cortical thickness calculation
    corticalthickness = pe.Node(interface=antsCorticalThickness(),
                                name='corticalthickness')
    corticalthickness.inputs.brain_probability_mask = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0_BrainCerebellumProbabilityMask.nii.gz'
    corticalthickness.inputs.brain_template = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0.nii.gz'
    corticalthickness.inputs.segmentation_priors = [
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors1.nii.gz',
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors2.nii.gz',
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors3.nii.gz',
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors4.nii.gz',
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors5.nii.gz',
        '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/Priors2/priors6.nii.gz'
    ]
    corticalthickness.inputs.extraction_registration_mask = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0_BrainCerebellumExtractionMask.nii.gz'
    corticalthickness.inputs.t1_registration_template = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0_BrainCerebellum.nii.gz'

    # Creating visualisations for quality control
    converter = pe.Node(interface=ConvertScalarImageToRGB(), name='converter')
    converter.inputs.dimension = 3
    converter.inputs.colormap = 'cool'
    converter.inputs.minimum_input = 0
    converter.inputs.maximum_input = 5

    mosaic_slicer = pe.Node(interface=CreateTiledMosaic(),
                            name='mosaic_slicer')
    mosaic_slicer.inputs.pad_or_crop = 'mask'
    mosaic_slicer.inputs.slices = '[4 ,mask , mask]'
    mosaic_slicer.inputs.direction = 1
    mosaic_slicer.inputs.alpha_value = 0.5

    # Getting GM density images
    gm_density = pe.Node(interface=GM_DENSITY(), name='gm_density')
    sl = pe.Node(interface=Select(index=1), name='sl')

    # Applying transformation
    at = pe.Node(interface=ApplyTransforms(), name='at')
    at.inputs.dimension = 3
    at.inputs.reference_image = '/imaging/jb07/Atlases/OASIS/OASIS-30_Atropos_template/T_template0_BrainCerebellum.nii.gz'
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = False

    # Multiplying the normalized image with Jacobian
    multiply_images = pe.Node(interface=MultiplyImages(dimension=3),
                              name='multiply_images')

    # Naming the output of multiply_image
    def generate_filename(subject_id):
        return subject_id + '_multiplied.nii.gz'

    generate_filename = pe.Node(interface=Function(
        input_names=["subject_id"],
        output_names=["out_filename"],
        function=generate_filename),
                                name='generate_filename')

    #====================================
    # Setting up the workflow
    antsthickness = pe.Workflow(name='antsthickness')

    antsthickness.connect(infosource, 'subject_id', selectfiles, 'subject_id')
    antsthickness.connect(selectfiles, 'T1', T1_rigid_quickSyN, 'moving_image')
    antsthickness.connect(infosource, 'subject_id', T1_rigid_quickSyN,
                          'output_prefix')
    antsthickness.connect(T1_rigid_quickSyN, 'warped_image', corticalthickness,
                          'anatomical_image')
    antsthickness.connect(infosource, 'subject_id', corticalthickness,
                          'out_prefix')
    antsthickness.connect(corticalthickness, 'CorticalThickness', converter,
                          'input_image')
    antsthickness.connect(converter, 'output_image', mosaic_slicer,
                          'rgb_image')
    antsthickness.connect(corticalthickness, 'BrainSegmentationN4',
                          mosaic_slicer, 'input_image')
    antsthickness.connect(corticalthickness, 'BrainExtractionMask',
                          mosaic_slicer, 'mask_image')

    antsthickness.connect(corticalthickness, 'BrainSegmentationN4', gm_density,
                          'in_file')
    antsthickness.connect(corticalthickness, 'BrainSegmentationPosteriors', sl,
                          'inlist')
    antsthickness.connect(sl, 'out', gm_density, 'mask_file')
    antsthickness.connect(corticalthickness, 'SubjectToTemplate1Warp', at,
                          'transforms')
    antsthickness.connect(gm_density, 'out_file', at, 'input_image')
    antsthickness.connect(corticalthickness, 'SubjectToTemplateLogJacobian',
                          multiply_images, 'second_input')
    antsthickness.connect(corticalthickness,
                          'CorticalThicknessNormedToTemplate', multiply_images,
                          'first_input')
    antsthickness.connect(infosource, 'subject_id', generate_filename,
                          'subject_id')
    antsthickness.connect(generate_filename, 'out_filename', multiply_images,
                          'output_product_image')

    #====================================
    # Running the workflow
    antsthickness.base_dir = os.path.abspath(directory)
    antsthickness.write_graph()
    antsthickness.run('PBSGraph')
예제 #13
0
def writeSeedFiles():
    CACHE_DIR = 'seeds_CACHE'
    RESULTS_DIR = 'seeds'
    REWRITE_DATASINKS = True
    nacAtlasFile = "/Shared/paulsen/Experiments/rsFMRI/rs-fMRI-pilot/ReferenceAtlas/template_t1.nii.gz"
    nacAtlasLabel = "/Shared/paulsen/Experiments/rsFMRI/rs-fMRI-pilot/ReferenceAtlas/template_nac_labels.nii.gz"
    nacResampleResolution = (2.0, 2.0, 2.0)
    downsampledNACfilename = 'downsampledNACatlas.nii.gz'

    preproc = pipe.Workflow(name=CACHE_DIR)
    preproc.base_dir = os.getcwd()

    labels, seeds = getAtlasPoints(
        '/Shared/paulsen/Experiments/rsFMRI/rs-fMRI-pilot/seeds.fcsv')

    seedsIdentity = pipe.Node(interface=IdentityInterface(fields=['index']),
                              name='seedsIdentity')
    seedsIdentity.iterables = ('index', range(len(labels)))

    selectSeed = pipe.Node(interface=Select(), name='selectSeed')
    selectSeed.inputs.inlist = seeds
    preproc.connect(seedsIdentity, 'index', selectSeed, 'index')

    selectLabel = pipe.Node(interface=Select(), name='selectLabel')
    selectLabel.inputs.inlist = labels
    preproc.connect(seedsIdentity, 'index', selectLabel, 'index')

    points = pipe.Node(interface=Function(
        function=createSphereExpression,
        input_names=['coordinates', 'radius'],
        output_names=['expression']),
                       name='createSphereExpression')
    preproc.connect(selectSeed, 'out', points, 'coordinates')

    downsampleAtlas = pipe.Node(interface=Function(
        function=resampleImage,
        input_names=['inputVolume', 'outputVolume', 'resolution'],
        output_names=['outputVolume']),
                                name="downsampleAtlas")
    downsampleAtlas.inputs.inputVolume = nacAtlasFile
    downsampleAtlas.inputs.outputVolume = downsampledNACfilename
    downsampleAtlas.inputs.resolution = [int(x) for x in nacResampleResolution]

    spheres = pipe.Node(interface=Calc(letters=['a']), name='afni3Dcalc_seeds')
    spheres.inputs.outputtype = 'NIFTI_GZ'
    preproc.connect(downsampleAtlas, 'outputVolume', spheres, 'in_file_a')
    spheres.inputs.args = '-nscale'

    preproc.connect(points, 'expression', spheres, 'expr')

    renameMasks = pipe.Node(interface=Rename(format_string='%(label)s_mask'),
                            name='renameMasksAtlas')
    renameMasks.inputs.keep_ext = True
    preproc.connect(selectLabel, 'out', renameMasks, 'label')
    preproc.connect(spheres, 'out_file', renameMasks, 'in_file')

    atlas_DataSink = pipe.Node(interface=DataSink(), name="atlas_DataSink")
    atlas_DataSink.inputs.base_directory = preproc.base_dir  # '/Shared/paulsen/Experiments/20130417_rsfMRI_Results'
    atlas_DataSink.inputs.container = RESULTS_DIR
    atlas_DataSink.inputs.parameterization = False
    atlas_DataSink.overwrite = REWRITE_DATASINKS
    preproc.connect(renameMasks, 'out_file', atlas_DataSink, 'Atlas')
    preproc.connect(downsampleAtlas, 'outputVolume', atlas_DataSink,
                    'Atlas.@resampled')
    preproc.run()
예제 #14
0
def create_extended_susan_workflow(name='extended_susan', separate_masks=True):

    input_node = pe.Node(IdentityInterface(fields=['in_file',
                                                   'fwhm',
                                                   'EPI_session_space',
                                                   'output_directory',
                                                   'sub_id']), name='inputspec')

    output_node = pe.Node(interface=IdentityInterface(fields=['smoothed_files',
                                                              'mask',
                                                              'mean']), name='outputspec')

    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    # first link the workflow's output_directory into the datasink.

    esw = pe.Workflow(name=name)

    esw.connect(input_node, 'output_directory', datasink, 'base_directory')
    esw.connect(input_node, 'sub_id', datasink, 'container')

    meanfuncmask = pe.Node(interface=fsl.BET(mask=True,
                                             no_output=True,
                                             frac=0.3),
                           name='meanfuncmask')

    esw.connect(input_node, 'EPI_session_space', meanfuncmask, 'in_file')

    """
    Mask the functional runs with the extracted mask
    """

    maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
                                                   op_string='-mas'),
                          iterfield=['in_file'],
                          name='maskfunc')

    esw.connect(input_node, 'in_file', maskfunc, 'in_file')
    esw.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')

    """
    Determine the 2nd and 98th percentile intensities of each functional run
    """

    getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
                           iterfield=['in_file'],
                           name='getthreshold')
    esw.connect(maskfunc, 'out_file', getthresh, 'in_file')

    """
    Threshold the first run of the functional data at 10% of the 98th percentile
    """

    threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
                                                    suffix='_thresh'),
                           iterfield=['in_file', 'op_string'],
                           name='threshold')

    esw.connect(maskfunc, 'out_file', threshold, 'in_file')

    """
    Define a function to get 10% of the intensity
    """

    esw.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')

    """
    Determine the median value of the functional runs using the mask
    """

    medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
                           iterfield=['in_file', 'mask_file'],
                           name='medianval')
    esw.connect(input_node, 'in_file', medianval, 'in_file')
    esw.connect(threshold, 'out_file', medianval, 'mask_file')

    """
    Dilate the mask
    """

    dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
                                                     op_string='-dilF'),
                            iterfield=['in_file'],
                            name='dilatemask')
    esw.connect(threshold, 'out_file', dilatemask, 'in_file')
    esw.connect(dilatemask, 'out_file', output_node, 'mask')

    """
    Mask the motion corrected functional runs with the dilated mask
    """

    maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
                                                    op_string='-mas'),
                           iterfield=['in_file', 'in_file2'],
                           name='maskfunc2')
    esw.connect(input_node, 'in_file', maskfunc2, 'in_file')
    esw.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')

    """
    Smooth each run using SUSAN with the brightness threshold set to 75%
    of the median value for each run and a mask constituting the mean
    functional
    """

    smooth = create_susan_smooth(separate_masks=separate_masks)

    esw.connect(input_node, 'fwhm', smooth, 'inputnode.fwhm')
    esw.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files')
    esw.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')

    """
    Mask the smoothed data with the dilated mask
    """

    maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
                                                    op_string='-mas'),
                           iterfield=['in_file', 'in_file2'],
                           name='maskfunc3')
    esw.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')

    esw.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')

    concatnode = pe.Node(interface=Merge(2),
                         name='concat')
    esw.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1')
    esw.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')

    """
    The following nodes select smooth or unsmoothed data depending on the
    fwhm. This is because SUSAN defaults to smoothing the data with about the
    voxel size of the input data if the fwhm parameter is less than 1/3 of the
    voxel size.
    """
    selectnode = pe.Node(interface=Select(), name='select')

    esw.connect(concatnode, 'out', selectnode, 'inlist')

    esw.connect(input_node, ('fwhm', chooseindex), selectnode, 'index')
    esw.connect(selectnode, 'out', output_node, 'smoothed_files')

    """
    Scale the median value of the run is set to 10000
    """

    meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
                           iterfield=['in_file', 'op_string'],
                           name='meanscale')
    esw.connect(selectnode, 'out', meanscale, 'in_file')

    """
    Define a function to get the scaling factor for intensity normalization
    """

    esw.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')

    """
    Generate a mean functional image from the first run
    """

    meanfunc3 = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean',
                                                 suffix='_mean'),
                        iterfield=['in_file'],
                        name='meanfunc3')

    esw.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file')
    esw.connect(meanfunc3, 'out_file', output_node, 'mean')

    # Datasink
    esw.connect(meanscale, 'out_file', datasink, 'filtering')
    esw.connect(selectnode, 'out', datasink, 'filtering.@smoothed')
    esw.connect(dilatemask, 'out_file', datasink, 'filtering.@mask')

    return esw
예제 #15
0
==============================
"""
grab_anat = Node(BIDSDataGrabber(), name = 'grab_anat')
grab_anat.inputs.base_dir = os.path.join(project_dir, data_dir)

# Iterate through subjects; [:2] for only 2 subjects to keep memory usage low
grab_anat.iterables = ('subject', subject_list)

# Define filetypes to grab, and how ouput will be accesses by other nodes 
grab_anat.inputs.output_query = {'T1w': dict(extension=['nii.gz'], suffix='T1w')}

#res = grab_anat.run()

# The Select utility interface
from nipype.interfaces.utility import Select
sel1 = Node(Select(), name='select1')
sel1.inputs.index = 0

wf.connect(grab_anat, 'T1w', sel1, 'inlist')

wf.run()

"""
============================================
Input of Functional Images
============================================
"""
grab_func = Node(BIDSDataGrabber(), name='grab_func')

grab_func.inputs.base_dir = os.path.join(project_dir, data_dir)
grab_func.inputs.output_query = {'bold': dict(extension=['nii.gz'], suffix='bold')}
예제 #16
0
datasource.inputs.template = '*'
datasource.inputs.sort_filelist = True
datasource.inputs.field_template = dict(ct='%s/CT.nii.gz',
                                        rtstruct='%s/RTSTRUCT/*.dcm')
datasource.inputs.template_args = dict(ct=[['sub_id']],
                                       rtstruct=[['sub_id']])
datasource.inputs.raise_on_empty = False
datasource.inputs.sub_id = sub_list

voxelizer = nipype.MapNode(interface=Voxelizer(), iterfield=['reference', 'struct_file'],
                           name='voxelizer')
voxelizer.inputs.regular_expression = '.*PTV.*'
voxelizer.inputs.multi_structs = True
voxelizer.inputs.binarization = True

select = nipype.MapNode(interface=Select(), iterfield=['inlist'], name='select')
select.inputs.index = 0

features = nipype.MapNode(interface=CLGlobalFeatures(), iterfield=['in_file', 'mask'],
                           name='features_extraction')
features.inputs.first_order = True
features.inputs.cooccurence = True
features.inputs.run_length = True
features.inputs.int_vol_hist = True
features.inputs.local_intensity = True
features.inputs.volume = True
features.inputs.id = True
# features.inputs.ngld = True
features.inputs.ngtd = True
features.inputs.use_header = True
FslRoi.inputs.t_size = 1

#-----------------------------------------------------------------------------------------------------
# In[11]:

# mcflirt -in ${folder} -out ${folder}_mcf  -refvol example_func -plots -mats  -report;

McFlirt = Node(fsl.MCFLIRT(), name = 'McFlirt')
McFlirt.inputs.save_plots = True
McFlirt.inputs.save_mats = True
McFlirt.inputs.save_rms = True
McFlirt.inputs.output_type = 'NIFTI'

#-----------------------------------------------------------------------------------------------------
#Getting motion parameters from Mcflirt and plotting them
Get_Abs_Displacement = Node(Select(), name = 'Get_Absolute_Displacement')
Get_Abs_Displacement.inputs.index = [0]


Get_Rel_Displacement = Node(Select(), name = 'Get_Relative_Displacement')
Get_Rel_Displacement.inputs.index = [1]


def Plot_Motion(motion_par, abs_disp, rel_disp):

    import numpy as np
    import matplotlib.pyplot as plt 

    movement = np.loadtxt(motion_par)
    abs_disp = np.loadtxt(abs_disp)
    rel_disp = np.loadtxt(rel_disp)
예제 #18
0
def spm_anat_to_diff_coregistration(wf_name="spm_anat_to_diff_coregistration"):
    """ Co-register the anatomical image and other images in anatomical space to
    the average B0 image.

    This estimates an affine transform from anat to diff space, applies it to
    the brain mask and an atlas.

    Nipype Inputs
    -------------
    dti_co_input.avg_b0: traits.File
        path to the average B0 image from the diffusion MRI.
        This image should come from a motion and Eddy currents
        corrected diffusion image.

    dti_co_input.anat: traits.File
        path to the high-contrast anatomical image.

    dti_co_input.tissues: traits.File
        paths to the NewSegment c*.nii output files, in anatomical space

    dti_co_input.atlas_anat: traits.File
        Atlas in subject anatomical space.

    Nipype Outputs
    --------------
    dti_co_output.anat_diff: traits.File
        Anatomical image in diffusion space.

    dti_co_output.tissues_diff: traits.File
        Tissues images in diffusion space.

    dti_co_output.brain_mask_diff: traits.File
        Brain mask for diffusion image.

    dti_co_output.atlas_diff: traits.File
        Atlas image warped to diffusion space.
        If the `atlas_file` option is an existing file and `normalize_atlas` is True.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_anat_preproc

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["avg_b0", "tissues", "anat"]
    out_fields = [
        "anat_diff",
        "tissues_diff",
        "brain_mask_diff",
    ]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields += ["atlas_anat"]
        out_fields += ["atlas_diff"]

    # input interface
    dti_input = pe.Node(IdentityInterface(fields=in_fields,
                                          mandatory_inputs=True),
                        name="dti_co_input")

    gunzip_b0 = pe.Node(Gunzip(), name="gunzip_b0")
    coreg_b0 = setup_node(spm_coregister(cost_function="mi"), name="coreg_b0")

    # co-registration
    brain_sel = pe.Node(Select(index=[0, 1, 2]), name="brain_sel")
    coreg_split = pe.Node(Split(splits=[1, 2], squeeze=True),
                          name="coreg_split")

    brain_merge = setup_node(MultiImageMaths(), name="brain_merge")
    brain_merge.inputs.op_string = "-add '%s' -add '%s' -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin"
    brain_merge.inputs.out_file = "brain_mask_diff.nii.gz"

    # output interface
    dti_output = pe.Node(IdentityInterface(fields=out_fields),
                         name="dti_co_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # co-registration
        (dti_input, coreg_b0, [("anat", "source")]),
        (dti_input, brain_sel, [("tissues", "inlist")]),
        (brain_sel, coreg_b0, [(("out", flatten_list), "apply_to_files")]),
        (dti_input, gunzip_b0, [("avg_b0", "in_file")]),
        (gunzip_b0, coreg_b0, [("out_file", "target")]),
        (coreg_b0, coreg_split, [("coregistered_files", "inlist")]),
        (coreg_split, brain_merge, [("out1", "in_file")]),
        (coreg_split, brain_merge, [("out2", "operand_files")]),

        # output
        (coreg_b0, dti_output, [("coregistered_source", "anat_diff")]),
        (coreg_b0, dti_output, [("coregistered_files", "tissues_diff")]),
        (brain_merge, dti_output, [("out_file", "brain_mask_diff")]),
    ])

    # add more nodes if to perform atlas registration
    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"),
                                 name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
            (dti_input, coreg_atlas, [
                ("anat", "source"),
                ("atlas_anat", "apply_to_files"),
            ]),
            (gunzip_b0, coreg_atlas, [("out_file", "target")]),
            (coreg_atlas, dti_output, [("coregistered_files", "atlas_diff")]),
        ])

    return wf