Esempio n. 1
0
    def global_tracking_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='global_tracking',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        mask = pipeline.add(
            'mask',
            DWI2Mask(),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        tracking = pipeline.add(
            'tracking',
            Tractography(
                select=self.parameter('num_global_tracks'),
                cutoff=self.parameter('global_tracks_cutoff')),
            inputs={
                'seed_image': (mask, 'out_file'),
                'in_file': ('wm_odf', mrtrix_image_format)},
            outputs={
                'global_tracks': ('out_file', mrtrix_track_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.provided('anat_5tt'):
            pipeline.connect_input('anat_5tt', tracking, 'act_file',
                                   mrtrix_image_format)

        return pipeline
Esempio n. 2
0
    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts the first b==0 volume from the series",
            citations=[],
            name_maps=name_maps)

        dwiextract = pipeline.add(
            'dwiextract',
            ExtractDWIorB0(
                bzero=True,
                out_ext='.nii.gz'),
            inputs={
                'in_file': ('series', nifti_gz_format),
                'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
            requirements=[mrtrix_req.v('3.0rc3')])

        pipeline.add(
            "extract_first_vol",
            MRConvert(
                coord=(3, 0)),
            inputs={
                'in_file': (dwiextract, 'out_file')},
            outputs={
                'magnitude': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 3
0
    def brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('bet_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                citations=[mrtrix_cite],
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                series = 'series_coreg'
            else:
                series = 'series_preproc'

            # Create mask node
            masker = pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': (series, nifti_gz_format),
                    'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
                outputs={
                    'brain_mask': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            merge = pipeline.add(
                'merge_operands',
                Merge(2),
                inputs={
                    'in1': ('mag_preproc', nifti_gz_format),
                    'in2': (masker, 'out_file')})

            pipeline.add(
                'apply_mask',
                MRCalc(
                    operation='multiply'),
                inputs={
                    'operands': (merge, 'out')},
                outputs={
                    'brain': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
        else:
            pipeline = super().brain_extraction_pipeline(**name_maps)
        return pipeline
Esempio n. 4
0
File: dmri.py Progetto: amrka/banana
    def extract_b0_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Extracts the b0 images from a DWI study and takes their mean
        """

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('b0', nifti_gz_format)],
        pipeline = self.new_pipeline(
            name='extract_b0',
            desc="Extract b0 image from a DWI study",
            references=[mrtrix_cite],
            name_maps=name_maps)
        # Gradient merge node
        fsl_grads = pipeline.add("fsl_grads", MergeTuple(2))
        # Extraction node
        extract_b0s = pipeline.add(
            'extract_b0s', ExtractDWIorB0(),
            requirements=[mrtrix_req.v('3.0rc3')])
        extract_b0s.inputs.bzero = True
        extract_b0s.inputs.quiet = True
        # FIXME: Need a registration step before the mean
        # Mean calculation node
        mean = pipeline.add(
            "mean",
            MRMath(),
            requirements=[mrtrix_req.v('3.0rc3')])
        mean.inputs.axis = 3
        mean.inputs.operation = 'mean'
        mean.inputs.quiet = True
        # Convert to Nifti
        mrconvert = pipeline.add("output_conversion", MRConvert(),
                                         requirements=[mrtrix_req.v('3.0rc3')])
        mrconvert.inputs.out_ext = '.nii.gz'
        mrconvert.inputs.quiet = True
        # Connect inputs
        pipeline.connect_input('bias_correct', extract_b0s, 'in_file')
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        # Connect between nodes
        pipeline.connect(extract_b0s, 'out_file', mean, 'in_files')
        pipeline.connect(fsl_grads, 'out', extract_b0s, 'grad_fsl')
        pipeline.connect(mean, 'out_file', mrconvert, 'in_file')
        # Connect outputs
        pipeline.connect_output('b0', mrconvert, 'out_file')
        # Check inputs/outputs are connected
        return pipeline
Esempio n. 5
0
    def average_response_pipeline(self, **name_maps):
        """
        Averages the estimate response function over all subjects in the
        project
        """

        pipeline = self.new_pipeline(
            name='average_response',
            desc=("Averages the fibre response function over the project"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        join_subjects = pipeline.add(
            'join_subjects',
            utility.IdentityInterface(['responses']),
            inputs={'responses': ('wm_response', text_format)},
            outputs={},
            joinsource=self.SUBJECT_ID,
            joinfield=['responses'])

        join_visits = pipeline.add(
            'join_visits',
            Chain(['responses']),
            inputs={'responses': (join_subjects, 'responses')},
            joinsource=self.VISIT_ID,
            joinfield=['responses'])

        pipeline.add('avg_response',
                     AverageResponse(),
                     inputs={'in_files': (join_visits, 'responses')},
                     outputs={'avg_response': ('out_file', text_format)},
                     requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 6
0
    def connectome_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='connectome',
            desc=("Generate a connectome from whole brain connectivity"),
            citations=[],
            name_maps=name_maps)

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(
                sub_paths=['mri', 'aparc+aseg.mgz']),
            inputs={
                'base_path': ('anat_fs_recon_all', directory_format)})

        pipeline.add(
            'connectome',
            mrtrix3.BuildConnectome(),
            inputs={
                'in_file': ('global_tracks', mrtrix_track_format),
                'in_parc': (aseg_path, 'out_path')},
            outputs={
                'connectome': ('out_file', csv_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 7
0
    def tensor_metrics_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='fa',
            desc=("Calculates the FA and ADC from a tensor image"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'metrics',
            TensorMetrics(
                out_fa='fa.nii.gz',
                out_adc='adc.nii.gz'),
            inputs={
                'in_file': ('tensor', nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'fa': ('out_fa', nifti_gz_format),
                'adc': ('out_adc', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 8
0
    def tensor_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='tensor',
            desc=("Estimates the apparent diffusion tensor in each "
                  "voxel"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'dwi2tensor',
            FitTensor(
                out_file='dti.nii.gz'),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'tensor': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 9
0
File: dmri.py Progetto: amrka/banana
    def tensor_metrics_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

#             inputs=[FilesetSpec('tensor', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=[FilesetSpec('fa', nifti_gz_format),
#                      FilesetSpec('adc', nifti_gz_format)],
        pipeline = self.new_pipeline(
            name='fa',
            desc=("Calculates the FA and ADC from a tensor image"),
            references=[],
            name_maps=name_maps)
        # Create tensor fit node
        metrics = pipeline.add(
            'metrics',
            TensorMetrics(),
            requirements=[mrtrix_req.v('3.0rc3')])
        metrics.inputs.out_fa = 'fa.nii.gz'
        metrics.inputs.out_adc = 'adc.nii.gz'
        # Connect to inputs
        pipeline.connect_input('tensor', metrics, 'in_file')
        pipeline.connect_input('brain_mask', metrics, 'in_mask')
        # Connect to outputs
        pipeline.connect_output('fa', metrics, 'out_fa')
        pipeline.connect_output('adc', metrics, 'out_adc')
        # Check inputs/output are connected
        return pipeline
Esempio n. 10
0
    def extract_b0_pipeline(self, **name_maps):
        """
        Extracts the b0 images from a DWI study and takes their mean
        """

        pipeline = self.new_pipeline(
            name='extract_b0',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Extraction node
        extract_b0s = pipeline.add(
            'extract_b0s',
            ExtractDWIorB0(
                bzero=True,
                quiet=True),
            inputs={
                'fslgrad': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # FIXME: Need a registration step before the mean
        # Mean calculation node
        mean = pipeline.add(
            "mean",
            MRMath(
                axis=3,
                operation='mean',
                quiet=True),
            inputs={
                'in_files': (extract_b0s, 'out_file')},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Convert to Nifti
        pipeline.add(
            "output_conversion",
            MRConvert(
                out_ext='.nii.gz',
                quiet=True),
            inputs={
                'in_file': (mean, 'out_file')},
            outputs={
                'b0': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 11
0
    def prepare_pipeline(self, **name_maps):
        """
        Performs basic preprocessing, such as swapping dimensions into
        standard orientation and resampling (if required)

        Parameters
        -------
        new_dims : tuple(str)[3]
            A 3-tuple with the new orientation of the image (see FSL
            swap dim)
        resolution : list(float)[3] | None
            New resolution of the image. If None no resampling is
            performed
        """
        pipeline = self.new_pipeline(
            name='prepare_pipeline',
            name_maps=name_maps,
            desc=("Dimensions swapping to ensure that all the images "
                  "have the same orientations."),
            citations=[fsl_cite])

        if (self.branch('reorient_to_std') or
                self.parameter('resampled_resolution') is not None):
            if self.branch('reorient_to_std'):
                swap = pipeline.add(
                    'fslreorient2std',
                    fsl.utils.Reorient2Std(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': ('magnitude', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.9')])
    #         swap.inputs.new_dims = self.parameter('reoriented_dims')

            if self.parameter('resampled_resolution') is not None:
                resample = pipeline.add(
                    "resample",
                    MRResize(
                        voxel=self.parameter('resampled_resolution')),
                    inputs={
                        'in_file': (swap, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])
                pipeline.connect_output('mag_preproc', resample, 'out_file',
                                        nifti_gz_format)
            else:
                pipeline.connect_output('mag_preproc', swap, 'out_file',
                                        nifti_gz_format)
        else:
            # Don't actually do any processing just copy magnitude image to
            # preproc
            pipeline.add(
                'identity',
                IdentityInterface(
                    ['file']),
                inputs={
                    'file': ('magnitude', nifti_gz_format)},
                outputs={
                    'mag_preproc': ('file', nifti_gz_format)})

        return pipeline
Esempio n. 12
0
class MrtrixConverter(Converter):

    input = 'in_file'
    output = 'out_file'
    requirements = [mrtrix_req.v(3)]

    @property
    def interface(self):
        return MRConvert(out_ext=self.output_format.extension, quiet=True)
Esempio n. 13
0
File: dmri.py Progetto: amrka/banana
    def response_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        response_algorithm : str
            Algorithm used to estimate the response
        """
#         outputs = [FilesetSpec('wm_response', text_format)]
#         if self.branch('response_algorithm', ('dhollander', 'msmt_5tt')):
#             outputs.append(FilesetSpec('gm_response', text_format))
#             outputs.append(FilesetSpec('csf_response', text_format))

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=outputs,

        pipeline = self.new_pipeline(
            name='response',
            desc=("Estimates the fibre response function"),
            references=[mrtrix_cite],
            name_maps=name_maps)
        # Create fod fit node
        response = pipeline.add(
            'response',
            ResponseSD(),
            requirements=[mrtrix_req.v('3.0rc3')])
        response.inputs.algorithm = self.parameter('response_algorithm')
        # Gradient merge node
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', response, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('bias_correct', response, 'in_file')
        pipeline.connect_input('brain_mask', response, 'in_mask')
        # Connect to outputs
        pipeline.connect_output('wm_response', response, 'wm_file')
        if self.multi_tissue:
            response.inputs.gm_file = 'gm.txt'
            response.inputs.csf_file = 'csf.txt'
            pipeline.connect_output('gm_response', response, 'gm_file')
            pipeline.connect_output('csf_response', response, 'csf_file')
        # Check inputs/output are connected
        return pipeline
Esempio n. 14
0
File: dmri.py Progetto: amrka/banana
    def fod_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        """

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format),
#                     FilesetSpec('wm_response', text_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=[FilesetSpec('fod', nifti_gz_format)],
        pipeline = self.new_pipeline(
            name='fod',
            desc=("Estimates the fibre orientation distribution in each"
                  " voxel"),
            references=[mrtrix_cite],
            name_maps=name_maps)
        if self.branch('fod_algorithm', 'msmt_csd'):
            pipeline.add_input(FilesetSpec('gm_response', text_format))
            pipeline.add_input(FilesetSpec('csf_response', text_format))
        # Create fod fit node
        dwi2fod = pipeline.add(
            'dwi2fod',
            EstimateFOD(),
            requirements=[mrtrix_req.v('3.0rc3')])
        dwi2fod.inputs.algorithm = self.parameter('fod_algorithm')
        # Gradient merge node
        fsl_grads = pipeline.add("fsl_grads", MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', dwi2fod, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('bias_correct', dwi2fod, 'in_file')
        pipeline.connect_input('wm_response', dwi2fod, 'wm_txt')
        pipeline.connect_input('brain_mask', dwi2fod, 'mask_file')
        # Connect to outputs
        pipeline.connect_output('wm_odf', dwi2fod, 'wm_odf')
        # If multi-tissue 
        if self.multi_tissue:
            pipeline.connect_input('gm_response', dwi2fod, 'gm_txt')
            pipeline.connect_input('csf_response', dwi2fod, 'csf_txt')
            dwi2fod.inputs.gm_odf = 'gm.mif'
            dwi2fod.inputs.csf_odf = 'csf.mif'
            pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf')
            pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf')
        # Check inputs/output are connected
        return pipeline
Esempio n. 15
0
    def test_subtract(self):

        tmp_dir = tempfile.mkdtemp()
        out_file = os.path.join(tmp_dir, 'out_file.mif')
        mrcalc = Node(MRCalc(), name='mrcalc',
                      requirements=[mrtrix_req.v('3.0rc3')])
        mrcalc.inputs.operands = [os.path.join(self.session_dir,
                                               'threes.mif'),
                                  os.path.join(self.session_dir,
                                               'ones.mif')]
        mrcalc.inputs.operation = 'subtract'
        mrcalc.inputs.out_file = out_file
        mrcalc.run()
        self.assertTrue(os.path.exists(out_file))
Esempio n. 16
0
    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts a single magnitude volume from a series",
            citations=[],
            name_maps=name_maps)

        pipeline.add("extract_first_vol",
                     MRConvert(coord=(3, 0)),
                     inputs={'in_file': ('series', nifti_gz_format)},
                     outputs={'magnitude': ('out_file', nifti_gz_format)},
                     requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Esempio n. 17
0
    def fod_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        """

        pipeline = self.new_pipeline(
            name='fod',
            desc=("Estimates the fibre orientation distribution in each"
                  " voxel"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        if self.branch('fod_algorithm', 'msmt_csd'):
            pipeline.add_input(FilesetSpec('gm_response', text_format))
            pipeline.add_input(FilesetSpec('csf_response', text_format))

        # Create fod fit node
        dwi2fod = pipeline.add(
            'dwi2fod',
            EstimateFOD(algorithm=self.parameter('fod_algorithm')),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'wm_txt': ('wm_response', text_format),
                'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)
            },
            outputs={'wm_odf': ('wm_odf', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.multi_tissue:
            dwi2fod.inputs.gm_odf = 'gm.mif',
            dwi2fod.inputs.csf_odf = 'csf.mif',
            pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
                                   text_format),
            pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
                                   text_format),
            pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
                                    nifti_gz_format),
            pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
                                    nifti_gz_format),
        # Check inputs/output are connected
        return pipeline
Esempio n. 18
0
    def nifti2dcm_conversion_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='conversion_to_dicom',
            desc=("Conversing aligned umap from nifti to dicom format - "
                  "parallel implementation"),
            citations=(),
            name_maps=name_maps)

        list_niftis = pipeline.add(
            'list_niftis',
            ListDir(),
            inputs={'directory': ('umaps_align2ref', directory_format)})

        reorient_niftis = pipeline.add('reorient_niftis',
                                       ReorientUmap(),
                                       inputs={
                                           'niftis': (list_niftis, 'files'),
                                           'umap': ('umap', dicom_format)
                                       },
                                       requirements=[mrtrix_req.v('3.0rc3')])

        list_dicoms = pipeline.add(
            'list_dicoms',
            ListDir(sort_key=dicom_fname_sort_key),
            inputs={'directory': ('umap', dicom_format)})

        nii2dicom = pipeline.add(
            'nii2dicom',
            Nii2Dicom(
                # extension='Frame',  #  nii2dicom parameter
            ),
            inputs={'reference_dicom': (list_dicoms, 'files')},
            outputs={'in_file': (reorient_niftis, 'reoriented_umaps')},
            iterfield=['in_file'],
            wall_time=20)

        pipeline.add(
            'copy2dir',
            CopyToDir(extension='Frame'),
            inputs={'in_files': (nii2dicom, 'out_file')},
            outputs={'umap_aligned_dicoms': ('out_dir', directory_format)})

        return pipeline
Esempio n. 19
0
File: dmri.py Progetto: amrka/banana
    def brain_extraction_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('brain_extract_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                references=[mrtrix_cite],
                name_maps=name_maps)

            # Gradient merge node
            grad_fsl = pipeline.add(
                "grad_fsl",
                MergeTuple(2),
                inputs={
                    'in1': ('grad_dirs', fsl_bvecs_format),
                    'in2': ('bvalues', fsl_bvals_format)})

            # Create mask node
            pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': ('preproc', nifti_gz_format)},
                connect={
                    'grad_fsl': (grad_fsl, 'out')},
                outputs={
                    'out_file': ('brain_mask', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

        else:
            pipeline = super(DmriStudy, self).brain_extraction_pipeline(
                **name_maps)
        return pipeline
Esempio n. 20
0
File: dmri.py Progetto: amrka/banana
    def bias_correct_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Corrects B1 field inhomogeneities
        """

#             inputs=[FilesetSpec('preproc', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('bias_correct', nifti_gz_format)],

        bias_method = self.parameter('bias_correct_method')
        pipeline = self.new_pipeline(
            name='bias_correct',
            desc="Corrects for B1 field inhomogeneity",
            references=[fast_cite,
                        (n4_cite if bias_method == 'ants' else fsl_cite)],
            name_maps=name_maps)
        # Create bias correct node
        bias_correct = pipeline.add(
            "bias_correct", DWIBiasCorrect(),
            requirements=(
                [mrtrix_req.v('3.0rc3')] +
                [ants_req.v('2.0')
                 if bias_method == 'ants' else fsl_req.v('5.0.9')]))
        bias_correct.inputs.method = bias_method
        # Gradient merge node
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', bias_correct, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('preproc', bias_correct, 'in_file')
        pipeline.connect_input('brain_mask', bias_correct, 'mask')
        # Connect to outputs
        pipeline.connect_output('bias_correct', bias_correct, 'out_file')
        # Check inputs/output are connected
        return pipeline
Esempio n. 21
0
    def response_pipeline(self, **name_maps):
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        response_algorithm : str
            Algorithm used to estimate the response
        """

        pipeline = self.new_pipeline(
            name='response',
            desc=("Estimates the fibre response function"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Create fod fit node
        response = pipeline.add(
            'response',
            ResponseSD(
                algorithm=self.parameter('response_algorithm')),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'wm_response': ('wm_file', text_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Connect to outputs
        if self.multi_tissue:
            response.inputs.gm_file = 'gm.txt',
            response.inputs.csf_file = 'csf.txt',
            pipeline.connect_output('gm_response', response, 'gm_file',
                                    text_format)
            pipeline.connect_output('csf_response', response, 'csf_file',
                                    text_format)

        return pipeline
Esempio n. 22
0
    def pet_data_preparation_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_data_preparation',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will prepare the data for the motion "
                  "correction"),
            citations=[],
            **kwargs)

        pipeline.add('prepare_pet',
                     PreparePetDir(image_orientation_check=self.parameter(
                         'image_orientation_check')),
                     inputs={'pet_dir': ('pet_recon_dir', directory_format)},
                     ouputs={
                         'pet_recon_dir_prepared':
                         ('pet_dir_prepared', directory_format)
                     },
                     requirements=[mrtrix_req.v('3.0rc3'),
                                   fsl_req.v('5.0.9')])

        return pipeline
Esempio n. 23
0
    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline
Esempio n. 24
0
    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Esempio n. 25
0
    def preprocess_pipeline(self, **name_maps):
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite, n4_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            citations=references)

        # Create nodes to gradients to FSL format
        if self.input('series').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('series', dicom_format)},
                outputs={
                    'grad_dirs': ('bvecs_file', fsl_bvecs_format),
                    'bvalues': ('bvals_file', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
                               'in2': (extract_grad, 'bvals_file')}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
                               'in2': ('bvalues', fsl_bvals_format)}
        else:
            raise BananaUsageError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            inputs=grad_fsl_inputs)

        gradients = (grad_fsl, 'out')

        # Create node to reorient preproc out_file
        if self.branch('reorient2std'):
            reorient = pipeline.add(
                'fslreorient2std',
                fsl.utils.Reorient2Std(
                    output_type='NIFTI_GZ'),
                inputs={
                    'in_file': ('series', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.9')])
            reoriented = (reorient, 'out_file')
        else:
            reoriented = ('series', nifti_gz_format)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': reoriented},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': reoriented,
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                inputs={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'noise_residual': ('out_file', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            denoised = (denoise, 'out_file')
        else:
            denoised = reoriented

        # Preproc kwargs
        preproc_kwargs = {}
        preproc_inputs = {'in_file': denoised,
                          'grad_fsl': gradients}

        if self.provided('reverse_phase'):

            if self.provided('magnitude', default_okay=False):
                dwi_reference = ('magnitude', mrtrix_image_format)
            else:
                # Extract b=0 volumes
                dwiextract = pipeline.add(
                    'dwiextract',
                    ExtractDWIorB0(
                        bzero=True,
                        out_ext='.nii.gz'),
                    inputs={
                        'in_file': denoised,
                        'fslgrad': gradients},
                    requirements=[mrtrix_req.v('3.0rc3')])

                # Get first b=0 from dwi b=0 volumes
                extract_first_b0 = pipeline.add(
                    "extract_first_vol",
                    MRConvert(
                        coord=(3, 0)),
                    inputs={
                        'in_file': (dwiextract, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])

                dwi_reference = (extract_first_b0, 'out_file')

            # Concatenate extracted forward rpe with reverse rpe
            combined_images = pipeline.add(
                'combined_images',
                MRCat(),
                inputs={
                    'first_scan': dwi_reference,
                    'second_scan': ('reverse_phase', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_inputs['se_epi'] = (combined_images, 'out_file')
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            inputs=preproc_inputs,
            outputs={
                'eddy_par': ('eddy_parameters', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60)

        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        mask = pipeline.add(
            'dwi2mask',
            BrainMask(
                out_file='brainmask.nii.gz'),
            inputs={
                'in_file': (preproc, 'out_file'),
                'grad_fsl': gradients},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Create bias correct node
        pipeline.add(
            "bias_correct",
            DWIBiasCorrect(
                method='ants'),
            inputs={
                'grad_fsl': gradients,  # internal
                'in_file': (preproc, 'out_file'),
                'mask': (mask, 'out_file')},
            outputs={
                'series_preproc': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])

        return pipeline
Esempio n. 26
0
    def intensity_normalisation_pipeline(self, **name_maps):

        if self.num_sessions < 2:
            raise ArcanaMissingDataException(
                "Cannot normalise intensities of DWI images as study only "
                "contains a single session")
        elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
            logger.warning(
                "The number of sessions in the study ({}) is less than the "
                "recommended number for intensity normalisation ({}). The "
                "results may be unreliable".format(
                    self.num_sessions,
                    self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))

        pipeline = self.new_pipeline(
            name='intensity_normalization',
            desc="Corrects for B1 field inhomogeneity",
            citations=[mrtrix_req.v('3.0rc3')],
            name_maps=name_maps)

        mrconvert = pipeline.add(
            'mrconvert',
            MRConvert(
                out_ext='.mif'),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Pair subject and visit ids together, expanding so they can be
        # joined and chained together
        session_ids = pipeline.add(
            'session_ids',
            utility.IdentityInterface(
                ['subject_id', 'visit_id']),
            inputs={
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)})

        # Set up join nodes
        join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
        join_over_subjects = pipeline.add(
            'join_over_subjects',
            utility.IdentityInterface(
                join_fields),
            inputs={
                'masks': (self.brain_mask_spec_name, nifti_gz_format),
                'dwis': (mrconvert, 'out_file'),
                'subject_ids': (session_ids, 'subject_id'),
                'visit_ids': (session_ids, 'visit_id')},
            joinsource=self.SUBJECT_ID,
            joinfield=join_fields)

        join_over_visits = pipeline.add(
            'join_over_visits',
            Chain(
                join_fields),
            inputs={
                'dwis': (join_over_subjects, 'dwis'),
                'masks': (join_over_subjects, 'masks'),
                'subject_ids': (join_over_subjects, 'subject_ids'),
                'visit_ids': (join_over_subjects, 'visit_ids')},
            joinsource=self.VISIT_ID,
            joinfield=join_fields)

        # Intensity normalization
        intensity_norm = pipeline.add(
            'dwiintensitynorm',
            DWIIntensityNorm(),
            inputs={
                'in_files': (join_over_visits, 'dwis'),
                'masks': (join_over_visits, 'masks')},
            outputs={
                'norm_intens_fa_template': ('fa_template',
                                            mrtrix_image_format),
                'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Set up expand nodes
        pipeline.add(
            'expand', SelectSession(),
            inputs={
                'subject_ids': (join_over_visits, 'subject_ids'),
                'visit_ids': (join_over_visits, 'visit_ids'),
                'inlist': (intensity_norm, 'out_files'),
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)},
            outputs={
                'norm_intensity': ('item', mrtrix_image_format)})

        # Connect inputs
        return pipeline
Esempio n. 27
0
File: dmri.py Progetto: amrka/banana
    def preprocess_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            references=references)

        # Create nodes to gradients to FSL format
        if self.input('magnitude').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('magnitude', dicom_format)},
                outputs={
                    'bvecs_file': ('grad_dirs', fsl_bvecs_format),
                    'bvals_file': ('bvalues', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_kwargs = {
                'connect': {'in1': (extract_grad, 'bvecs_file'),
                            'in2': (extract_grad, 'bvals_file')}}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_kwargs = {
                'inputs': {'in1': ('grad_dirs', fsl_bvecs_format),
                           'in2': ('bvalues', fsl_bvals_format)}}
        else:
            raise ArcanaDesignError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            **grad_fsl_kwargs)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': ('magnitude', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': ('magnitude', nifti_gz_format)},
                connect={
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                connect={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'out_file': ('noise_residual', mrtrix_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

        # Preproc kwargs
        preproc_kwargs = {}

        if (self.provided('dwi_reference') or
                self.provided('reverse_phase')):
            # Extract b=0 volumes
            dwiextract = pipeline.add(
                'dwiextract',
                ExtractDWIorB0(
                    bzero=True,
                    out_ext='.nii.gz'),
                inputs={
                    'in_file': ('magnitude', dicom_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Get first b=0 from dwi b=0 volumes
            mrconvert = pipeline.add(
                "mrconvert",
                MRConvert(
                    coord=(3, 0)),
                connect={
                    'in_file': (dwiextract, 'out_file')},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Concatenate extracted forward rpe with reverse rpe
            mrcat = pipeline.add(
                'mrcat',
                MRCat(),
                inputs={
                    'second_scan': ((
                        'dwi_reference' if self.provided('dwi_reference')
                        else 'reverse_phase'), mrtrix_format)},
                connect={
                    'first_scan': (mrconvert, 'out_file')},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_conns = {'connect': {'se_epi': (mrcat, 'out_file')}}
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True
            preproc_conns = {}

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            connect={
                'grad_fsl': (grad_fsl, 'out')},
            outputs={
                'eddy_parameters': ('eddy_par', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60,
            **preproc_conns)
        if self.branch('preproc_denoise'):
            pipeline.connect(denoise, 'out_file', preproc, 'in_file')
        else:
            pipeline.connect_input('magnitude', preproc, 'in_file',
                                   nifti_gz_format)
        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        # Create node to reorient preproc out_file
        pipeline.add(
            'fslreorient2std',
            fsl.utils.Reorient2Std(),
            connect={
                'in_file': (preproc, 'out_file')},
            outputs={
                'out_file': ('preproc', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline
Esempio n. 28
0
File: dmri.py Progetto: amrka/banana
    def intensity_normalisation_pipeline(self, **name_maps):

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('norm_intensity', mrtrix_format),
#                      FilesetSpec('norm_intens_fa_template', mrtrix_format,
#                                  frequency='per_study'),
#                      FilesetSpec('norm_intens_wm_mask', mrtrix_format,
#                                  frequency='per_study')],
        pipeline = self.new_pipeline(
            name='intensity_normalization',
            desc="Corrects for B1 field inhomogeneity",
            references=[mrtrix_req.v('3.0rc3')],
            name_maps=name_maps)
        # Convert from nifti to mrtrix format
        grad_merge = pipeline.add("grad_merge", MergeTuple(2))
        mrconvert = pipeline.add('mrconvert', MRConvert())
        mrconvert.inputs.out_ext = '.mif'
        # Set up join nodes
        fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
        join_subjects = pipeline.add(
            'join_subjects',
            IdentityInterface(fields),
            joinsource=self.SUBJECT_ID,
            joinfield=fields)
        join_visits = pipeline.add(
            'join_visits',
            Chain(fields),
            joinsource=self.VISIT_ID,
            joinfield=fields)
        # Set up expand nodes
        select = pipeline.add(
            'expand', SelectSession())
        # Intensity normalization
        intensity_norm = pipeline.add(
            'dwiintensitynorm', DWIIntensityNorm())
        # Connect inputs
        pipeline.connect_input('bias_correct', mrconvert, 'in_file')
        pipeline.connect_input('grad_dirs', grad_merge, 'in1')
        pipeline.connect_input('bvalues', grad_merge, 'in2')
        pipeline.connect_subject_id(join_subjects, 'subject_ids')
        pipeline.connect_visit_id(join_subjects, 'visit_ids')
        pipeline.connect_subject_id(select, 'subject_id')
        pipeline.connect_visit_id(select, 'visit_id')
        pipeline.connect_input('brain_mask', join_subjects, 'masks')
        # Internal connections
        pipeline.connect(grad_merge, 'out', mrconvert, 'grad_fsl')
        pipeline.connect(mrconvert, 'out_file', join_subjects, 'dwis')
        pipeline.connect(join_subjects, 'dwis', join_visits, 'dwis')
        pipeline.connect(join_subjects, 'masks', join_visits, 'masks')
        pipeline.connect(join_subjects, 'subject_ids', join_visits,
                         'subject_ids')
        pipeline.connect(join_subjects, 'visit_ids', join_visits,
                         'visit_ids')
        pipeline.connect(join_visits, 'dwis', intensity_norm, 'in_files')
        pipeline.connect(join_visits, 'masks', intensity_norm, 'masks')
        pipeline.connect(join_visits, 'subject_ids', select, 'subject_ids')
        pipeline.connect(join_visits, 'visit_ids', select, 'visit_ids')
        pipeline.connect(intensity_norm, 'out_files', select, 'items')
        # Connect outputs
        pipeline.connect_output('norm_intensity', select, 'item')
        pipeline.connect_output('norm_intens_fa_template', intensity_norm,
                                'fa_template')
        pipeline.connect_output('norm_intens_wm_mask', intensity_norm,
                                'wm_mask')
        return pipeline