Example #1
0
    def bet_T1(self, **name_maps):

        pipeline = self.new_pipeline(
            name='BET_T1',
            name_maps=name_maps,
            desc=("Brain extraction pipeline using FSL's BET"),
            citations=[fsl_cite])

        bias = pipeline.add('n4_bias_correction',
                            ants.N4BiasFieldCorrection(),
                            inputs={'input_image': ('t1', nifti_gz_format)},
                            requirements=[ants_req.v('1.9')],
                            wall_time=60,
                            mem_gb=12)

        pipeline.add('bet',
                     fsl.BET(frac=0.15,
                             reduce_bias=True,
                             output_type='NIFTI_GZ'),
                     inputs={'in_file': (bias, 'output_image')},
                     outputs={
                         'betted_T1': ('out_file', nifti_gz_format),
                         'betted_T1_mask': ('mask_file', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=8,
                     wall_time=45)

        return pipeline
Example #2
0
    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline
Example #3
0
File: base.py Project: amrka/banana
    def _ants_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using ANTS's linear_reg
        command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using ANTs")

        pipeline.add('ANTs_linear_Reg',
                     AntsRegSyn(num_dimensions=3,
                                transformation='r',
                                out_prefix='reg2hires'),
                     inputs={
                         'ref_file': ('coreg_ref', nifti_gz_format),
                         'input_file': ('preproc', nifti_gz_format)
                     },
                     outputs={
                         'reg_file': ('coreg', nifti_gz_format),
                         'regmat': ('coreg_matrix', text_matrix_format)
                     },
                     wall_time=10,
                     requirements=[ants_req.v('2.0')])

        return pipeline
Example #4
0
    def bet_T1(self, **name_maps):

        pipeline = self.new_pipeline(name='BET_T1',
                                     name_maps=name_maps,
                                     desc=("python implementation of BET"),
                                     references=[fsl_cite])

        bias = pipeline.add('n4_bias_correction',
                            ants.N4BiasFieldCorrection(),
                            inputs={'input_image': ('t1', nifti_gz_format)},
                            requirements=[ants_req.v('1.9')],
                            wall_time=60,
                            mem_gb=12)

        pipeline.add('bet',
                     fsl.BET(frac=0.15, reduce_bias=True),
                     connections={'in_file': (bias, 'output_image')},
                     outputs={
                         'out_file': ('betted_T1', nifti_gz_format),
                         'mask_file': ('betted_T1_mask', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=8,
                     wall_time=45)

        return pipeline
Example #5
0
    def cet_T1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='CET_T1',
            name_maps=name_maps,
            desc=("Construct cerebellum mask using SUIT template"),
            citations=[fsl_cite])

        # FIXME: Should convert to inputs
        nl = self._lookup_nl_tfm_inv_name('MNI')
        linear = self._lookup_l_tfm_to_name('MNI')

        # Initially use MNI space to warp SUIT into T1 and threshold to mask
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in2': (nl, nifti_gz_format),
                                       'in1': (linear, nifti_gz_format)
                                   })

        apply_trans = pipeline.add('ApplyTransform',
                                   ants.resampling.ApplyTransforms(
                                       interpolation='NearestNeighbor',
                                       input_image_type=3,
                                       invert_transform_flags=[True, False]),
                                   inputs={
                                       'reference_image':
                                       ('betted_T1', nifti_gz_format),
                                       'input_image':
                                       ('suit_mask', nifti_gz_format),
                                       'transforms': (merge_trans, 'out')
                                   },
                                   requirements=[ants_req.v('1.9')],
                                   mem_gb=16,
                                   wall_time=120)

        pipeline.add('maths2',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas'),
                     inputs={
                         'in_file': ('betted_T1', nifti_gz_format),
                         'in_file2': (apply_trans, 'output_image')
                     },
                     outputs={
                         'cetted_T1': ('out_file', nifti_gz_format),
                         'cetted_T1_mask': ('output_image', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        return pipeline
Example #6
0
File: dmri.py Project: amrka/banana
    def bias_correct_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Corrects B1 field inhomogeneities
        """

#             inputs=[FilesetSpec('preproc', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('bias_correct', nifti_gz_format)],

        bias_method = self.parameter('bias_correct_method')
        pipeline = self.new_pipeline(
            name='bias_correct',
            desc="Corrects for B1 field inhomogeneity",
            references=[fast_cite,
                        (n4_cite if bias_method == 'ants' else fsl_cite)],
            name_maps=name_maps)
        # Create bias correct node
        bias_correct = pipeline.add(
            "bias_correct", DWIBiasCorrect(),
            requirements=(
                [mrtrix_req.v('3.0rc3')] +
                [ants_req.v('2.0')
                 if bias_method == 'ants' else fsl_req.v('5.0.9')]))
        bias_correct.inputs.method = bias_method
        # Gradient merge node
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', bias_correct, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('preproc', bias_correct, 'in_file')
        pipeline.connect_input('brain_mask', bias_correct, 'mask')
        # Connect to outputs
        pipeline.connect_output('bias_correct', bias_correct, 'out_file')
        # Check inputs/output are connected
        return pipeline
Example #7
0
File: base.py Project: amrka/banana
    def _ants_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='coregister_to_atlas',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            references=[fsl_cite])

        ants_reg = pipeline.add('Struct2MNI_reg',
                                AntsRegSyn(num_dimensions=3,
                                           transformation='s',
                                           out_prefix='Struct2MNI',
                                           num_threads=4),
                                inputs={
                                    'input_file':
                                    (self.brain_spec_name, nifti_gz_format),
                                    'ref_file':
                                    ('atlas_brain', nifti_gz_format)
                                },
                                outputs={
                                    'reg_file':
                                    ('coreg_to_atlas', nifti_gz_format),
                                    'regmat':
                                    ('coreg_to_atlas_mat', text_matrix_format),
                                    'warp_file':
                                    ('coreg_to_atlas_warp', nifti_gz_format)
                                },
                                wall_time=25,
                                requirements=[ants_req.v('2.0')])

        pipeline.add('slices',
                     FSLSlices(outname='coreg_to_atlas_report'),
                     inputs={'im1': ('atlas', nifti_gz_format)},
                     connect={'im2': (ants_reg, 'reg_file')},
                     outputs={'report': ('coreg_to_atlas_report', gif_format)},
                     wall_time=1,
                     requirements=[fsl_req.v('5.0.8')])

        return pipeline
Example #8
0
    def cet_T2s(self, **options):

        pipeline = self.new_pipeline(
            name='CET_T2s',
            desc=("Construct cerebellum mask using SUIT template"),
            default_options={
                'SUIT_mask': self._lookup_template_mask_path('SUIT')
            },
            citations=[fsl_cite],
            options=options)

        # Initially use MNI space to warp SUIT mask into T2s space
        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(3),
            inputs={
                'in3': (self._lookup_nl_tfm_inv_name('SUIT'), nifti_gz_format),
                'in2': (self._lookup_l_tfm_to_name('SUIT'), nifti_gz_format),
                'in1': ('T2s_to_T1_mat', text_matrix_format)
            })

        apply_trans = pipeline.add(
            'ApplyTransform',
            ants.resampling.ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
                input_image=pipeline.option('SUIT_mask')),
            inputs={
                'transforms': (merge_trans, 'out'),
                'reference_image': ('betted_T2s', nifti_gz_format)
            },
            outputs={'cetted_T2s_mask': ('output_image', nifti_gz_format)},
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=120)

        # Combine masks
        maths1 = pipeline.add('combine_masks',
                              fsl.utils.ImageMaths(suffix='_optiBET_masks',
                                                   op_string='-mas',
                                                   output_type='NIFTI_GZ'),
                              inputs={
                                  'in_file':
                                  ('betted_T2s_mask', nifti_gz_format),
                                  'in_file2': (apply_trans, 'output_image')
                              },
                              requirements=[fsl_req.v('5.0.8')],
                              mem_gb=16,
                              wall_time=5)

        # Mask out t2s image
        pipeline.add('mask_t2s',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas',
                                          output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': ('betted_T2s', nifti_gz_format),
                         'in_file2': (maths1, 'output_image')
                     },
                     outputs={'cetted_T2s': ('out_file', nifti_gz_format)},
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        pipeline.add(
            'mask_t2s_last_echo',
            fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                 op_string='-mas',
                                 output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('betted_T2s_last_echo', nifti_gz_format),
                'in_file2': (maths1, 'output_image')
            },
            outputs={'cetted_T2s_last_echo': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            mem_gb=16,
            wall_time=5)

        return pipeline
Example #9
0
    def cv_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='cv_pipeline',
                                     name_maps=name_maps,
                                     desc="Compute Composite Vein Image",
                                     citations=[fsl_cite, matlab_cite])

        # Interpolate priors and atlas
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(3),
                                   inputs={
                                       'in1':
                                       ('coreg_ants_mat', text_matrix_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format)
                                   })

        apply_trans_q = pipeline.add(
            'ApplyTransform_Q_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_qsm_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_s = pipeline.add(
            'ApplyTransform_S_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_swi_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_a = pipeline.add(
            'ApplyTransform_A_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
            ),
            inputs={
                'reference_image': ('qsm', nifti_gz_format),
                'input_image': ('mni_template_atlas_prior', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_v = pipeline.add(
            'ApplyTransform_V_Atlas',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_vein_atlas', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        # Run CV code
        pipeline.add(
            'cv_image',
            interface=CompositeVeinImage(),
            inputs={
                'mask': ('brain_mask', nifti_format),
                'qsm': ('qsm', nifti_format),
                'swi': ('swi', nifti_format),
                'q_prior': (apply_trans_q, 'output_image'),
                's_prior': (apply_trans_s, 'output_image'),
                'a_prior': (apply_trans_a, 'output_image'),
                'vein_atlas': (apply_trans_v, 'output_image')
            },
            outputs={'composite_vein_image': ('out_file', nifti_format)},
            requirements=[matlab_req.v('R2015a')],
            wall_time=300,
            mem_gb=24)

        return pipeline
Example #10
0
    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Example #11
0
    def _ants_to_tmpl_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mag_coreg_to_tmpl',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            citations=[fsl_cite])

        pipeline.add(
            'Struct2MNI_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                num_threads=4),
            inputs={
                'input_file': (self.brain_spec_name, nifti_gz_format),
                'ref_file': ('template_brain', nifti_gz_format)},
            outputs={
                'mag_coreg_to_tmpl': ('reg_file', nifti_gz_format),
                'coreg_to_tmpl_ants_mat': ('regmat', text_matrix_format),
                'coreg_to_tmpl_ants_warp': ('warp_file', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid', 'Affine', 'SyN'],
#                 transform_parameters=[(0.1,), (0.1,), (0.1, 3, 0)],
#                 metric=['MI', 'MI', 'CC'],
#                 metric_weight=[1, 1, 1],
#                 radius_or_number_of_bins=[32, 32, 32],
#                 sampling_strategy=['Regular', 'Regular', 'None'],
#                 sampling_percentage=[0.25, 0.25, None],
#                 number_of_iterations=[[1000, 500, 250, 100],
#                                       [1000, 500, 250, 100],
#                                       [100, 70, 50, 20]],
#                 convergence_threshold=[1e-6, 1e-6, 1e-6],
#                 convergence_window_size=[10, 10, 10],
#                 shrink_factors=[[8, 4, 2, 1],
#                                 [8, 4, 2, 1],
#                                 [8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0],
#                                   [3, 2, 1, 0],
#                                   [3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('template_brain', nifti_gz_format),
#                 'moving_image': (self.brain_spec_name, nifti_gz_format)},
#             outputs={
#                 'mag_coreg_to_tmpl': ('warped_image', nifti_gz_format)},
#             wall_time=25,
#             requirements=[ants_req.v('2.0')])
#
#         select_trans = pipeline.add(
#             'select',
#             SelectOne(
#                 index=1),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_mat': ('out', text_matrix_format)})
#
#         pipeline.add(
#             'select_warp',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_warp': ('out', nifti_gz_format)})
#
#         pipeline.add(
#             'slices',
#             FSLSlices(
#                 outname='coreg_to_tmpl_report'),
#             inputs={
#                 'im1': ('template', nifti_gz_format),
#                 'im2': (select_trans, 'out')},
#             outputs={
#                 'coreg_to_tmpl_fsl_report': ('report', gif_format)},
#             wall_time=1,
#             requirements=[fsl_req.v('5.0.8')])

        return pipeline
Example #12
0
    def _optiBET_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc=("Modified implementation of optiBET.sh"),
            citations=[fsl_cite])

        mni_reg = pipeline.add(
            'T1_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                out_prefix='T12MNI',
                num_threads=4),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(2),
            inputs={
                'in1': (mni_reg, 'inv_warp'),
                'in2': (mni_reg, 'regmat')},
            wall_time=1)

        trans_flags = pipeline.add(
            'trans_flags',
            Merge(2,
                  in1=False,
                  in2=True),
            wall_time=1)

        apply_trans = pipeline.add(
            'ApplyTransform',
            ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3),
            inputs={
                'input_image': ('template_mask', nifti_gz_format),
                'reference_image': ('mag_preproc', nifti_gz_format),
                'transforms': (merge_trans, 'out'),
                'invert_transform_flags': (trans_flags, 'out')},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2.0')])

        maths1 = pipeline.add(
            'binarize',
            fsl.ImageMaths(
                suffix='_optiBET_brain_mask',
                op_string='-bin',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (apply_trans, 'output_image')},
            outputs={
                'brain_mask': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        maths2 = pipeline.add(
            'mask',
            fsl.ImageMaths(
                suffix='_optiBET_brain',
                op_string='-mas',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'in_file2': (maths1, 'out_file')},
            outputs={
                'brain': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        if self.branch('optibet_gen_report'):
            pipeline.add(
                'slices',
                FSLSlices(
                    outname='optiBET_report',
                    output_type='NIFTI_GZ'),
                wall_time=5,
                inputs={
                    'im1': ('mag_preproc', nifti_gz_format),
                    'im2': (maths2, 'out_file')},
                outputs={
                    'optiBET_report': ('report', gif_format)},
                requirements=[fsl_req.v('5.0.8')])

        return pipeline
Example #13
0
    def _ants_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using ANTS's linear_reg
        command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using ANTs",
            citations=[ants_cite])

        pipeline.add(
            'ANTs_linear_Reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='r'),
            inputs={
                'ref_file': ('coreg_ref', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'mag_coreg': ('reg_file', nifti_gz_format),
                'coreg_ants_mat': ('regmat', text_matrix_format)},
            wall_time=10,
            requirements=[ants_req.v('2.0')])


#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid'],
#                 transform_parameters=[(0.1,)],
#                 metric=['MI'],
#                 metric_weight=[1],
#                 radius_or_number_of_bins=[32],
#                 sampling_strategy=['Regular'],
#                 sampling_percentage=[0.25],
#                 number_of_iterations=[[1000, 500, 250, 100]],
#                 convergence_threshold=[1e-6],
#                 convergence_window_size=[10],
#                 shrink_factors=[[8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('coreg_ref', nifti_gz_format),
#                 'moving_image': ('mag_preproc', nifti_gz_format)},
#             outputs={
#                 'mag_coreg': ('warped_image', nifti_gz_format)},
#             wall_time=10,
#             requirements=[ants_req.v('2.0')])
#
#         pipeline.add(
#             'select',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_ants_mat': ('out', text_matrix_format)})

        return pipeline
Example #14
0
    def brain_coreg_pipeline(self, **name_maps):
        """
        Coregistered + brain-extracted images can be derived in 2-ways. If an
        explicit brain-extracted reference is provided to
        'coreg_ref_brain' then that is used to coregister a brain extracted
        image against. Alternatively, if only a skull-included reference is
        provided then the registration is performed with skulls-included and
        then brain extraction is performed after
        """
        if self.provided('coreg_ref_brain'):
            # If a reference brain extracted image is provided we coregister
            # the brain extracted image to that
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(
                    input_map={
                        'mag_preproc': 'brain',
                        'coreg_ref': 'coreg_ref_brain'},
                    output_map={
                        'mag_coreg': 'brain_coreg'},
                    name_maps=name_maps))

            # Apply coregistration transform to brain mask
            if self.branch('coreg_method', 'flirt'):
                pipeline.add(
                    'mask_transform',
                    ApplyXFM(
                        output_type='NIFTI_GZ',
                        apply_xfm=True),
                    inputs={
                        'in_matrix_file': (pipeline.node('flirt'),
                                           'out_matrix_file'),
                        'in_file': ('brain_mask', nifti_gz_format),
                        'reference': ('coreg_ref_brain', nifti_gz_format)},
                    outputs={
                        'brain_mask_coreg': ('out_file', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.10')],
                    wall_time=10)

            elif self.branch('coreg_method', 'ants'):
                # Convert ANTs transform matrix to FSL format if we have used
                # Ants registration so we can apply the transform using
                # ApplyXFM
                pipeline.add(
                    'mask_transform',
                    ants.resampling.ApplyTransforms(
                        interpolation='Linear',
                        input_image_type=3,
                        invert_transform_flags=[True, True, False]),
                    inputs={
                        'input_image': ('brain_mask', nifti_gz_format),
                        'reference_image': ('coreg_ref_brain',
                                            nifti_gz_format),
                        'transforms': (pipeline.node('ants_reg'),
                                       'forward_transforms')},
                    requirements=[ants_req.v('1.9')], mem_gb=16,
                    wall_time=30)
            else:
                self.unhandled_branch('coreg_method')

        elif self.provided('coreg_ref'):
            # If coreg_ref is provided then we co-register the non-brain
            # extracted images and then brain extract the co-registered image
            pipeline = self.brain_extraction_pipeline(
                name='bet_coreg',
                input_map={'mag_preproc': 'mag_coreg'},
                output_map={'brain': 'brain_coreg',
                            'brain_mask': 'brain_mask_coreg'},
                name_maps=name_maps)
        else:
            raise BananaUsageError(
                "Either 'coreg_ref' or 'coreg_ref_brain' needs to be provided "
                "in order to derive brain_coreg or brain_mask_coreg")
        return pipeline
Example #15
0
    def preprocess_pipeline(self, **name_maps):
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite, n4_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            citations=references)

        # Create nodes to gradients to FSL format
        if self.input('series').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('series', dicom_format)},
                outputs={
                    'grad_dirs': ('bvecs_file', fsl_bvecs_format),
                    'bvalues': ('bvals_file', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
                               'in2': (extract_grad, 'bvals_file')}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
                               'in2': ('bvalues', fsl_bvals_format)}
        else:
            raise BananaUsageError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            inputs=grad_fsl_inputs)

        gradients = (grad_fsl, 'out')

        # Create node to reorient preproc out_file
        if self.branch('reorient2std'):
            reorient = pipeline.add(
                'fslreorient2std',
                fsl.utils.Reorient2Std(
                    output_type='NIFTI_GZ'),
                inputs={
                    'in_file': ('series', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.9')])
            reoriented = (reorient, 'out_file')
        else:
            reoriented = ('series', nifti_gz_format)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': reoriented},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': reoriented,
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                inputs={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'noise_residual': ('out_file', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            denoised = (denoise, 'out_file')
        else:
            denoised = reoriented

        # Preproc kwargs
        preproc_kwargs = {}
        preproc_inputs = {'in_file': denoised,
                          'grad_fsl': gradients}

        if self.provided('reverse_phase'):

            if self.provided('magnitude', default_okay=False):
                dwi_reference = ('magnitude', mrtrix_image_format)
            else:
                # Extract b=0 volumes
                dwiextract = pipeline.add(
                    'dwiextract',
                    ExtractDWIorB0(
                        bzero=True,
                        out_ext='.nii.gz'),
                    inputs={
                        'in_file': denoised,
                        'fslgrad': gradients},
                    requirements=[mrtrix_req.v('3.0rc3')])

                # Get first b=0 from dwi b=0 volumes
                extract_first_b0 = pipeline.add(
                    "extract_first_vol",
                    MRConvert(
                        coord=(3, 0)),
                    inputs={
                        'in_file': (dwiextract, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])

                dwi_reference = (extract_first_b0, 'out_file')

            # Concatenate extracted forward rpe with reverse rpe
            combined_images = pipeline.add(
                'combined_images',
                MRCat(),
                inputs={
                    'first_scan': dwi_reference,
                    'second_scan': ('reverse_phase', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_inputs['se_epi'] = (combined_images, 'out_file')
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            inputs=preproc_inputs,
            outputs={
                'eddy_par': ('eddy_parameters', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60)

        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        mask = pipeline.add(
            'dwi2mask',
            BrainMask(
                out_file='brainmask.nii.gz'),
            inputs={
                'in_file': (preproc, 'out_file'),
                'grad_fsl': gradients},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Create bias correct node
        pipeline.add(
            "bias_correct",
            DWIBiasCorrect(
                method='ants'),
            inputs={
                'grad_fsl': gradients,  # internal
                'in_file': (preproc, 'out_file'),
                'mask': (mask, 'out_file')},
            outputs={
                'series_preproc': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])

        return pipeline