Пример #1
0
 def _run_interface(self, runtime):
     for input_image in self.inputs.input_images:
         ax = ApplyTransforms(input_image=input_image,
                              reference_image=self.inputs.reference_image,
                              interpolation=self.inputs.interpolation,
                              transforms=self.inputs.transforms,
                              out_postfix=self.inputs.out_postfix,
                              default_value=self.inputs.default_value)
         ax.run()
     return runtime
Пример #2
0
    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline
Пример #3
0
def test_ApplyTransforms_outputs():
    output_map = dict(output_image=dict(), )
    outputs = ApplyTransforms.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Пример #4
0
    def ApplyTransform_pipeline(self, **kwargs):
        pipeline = self.create_pipeline(
            name='applytransform',
            inputs=[DatasetSpec('pet_volumes', nifti_gz_format),
                    DatasetSpec('warp_file', nifti_gz_format),
                    DatasetSpec('affine_mat', text_matrix_format)],
            outputs=[DatasetSpec('registered_volumes', nifti_gz_format)],
            desc=('Apply transformation the the 4D PET timeseries'),
            version=1,
            citations=[],
            **kwargs)

        merge_trans = pipeline.create_node(Merge(2), name='merge_transforms')
        pipeline.connect_input('warp_file', merge_trans, 'in1')
        pipeline.connect_input('affine_mat', merge_trans, 'in2')

        apply_trans = pipeline.create_node(
            ApplyTransforms(), name='ApplyTransform')
        apply_trans.inputs.reference_image = self.parameter(
            'trans_template')
        apply_trans.inputs.interpolation = 'Linear'
        apply_trans.inputs.input_image_type = 3
        pipeline.connect(merge_trans, 'out', apply_trans, 'transforms')
        pipeline.connect_input('pet_volumes', apply_trans, 'input_image')

        pipeline.connect_output('registered_volumes', apply_trans,
                                'output_image')
        return pipeline
Пример #5
0
    def ApplyTransform_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='applytransform',
            desc=('Apply transformation the the 4D PET timeseries'),
            citations=[],
            **kwargs)

        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in1': ('warp_file', nifti_gz_format),
                                       'in2':
                                       ('affine_mat', text_matrix_format)
                                   })

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(reference_image=self.parameter('trans_template'),
                            interpolation='Linear',
                            input_image_type=3),
            inputs={
                'input_image': ('pet_volumes', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'registered_volumes': ('output_image', nifti_gz_format)})

        return pipeline
Пример #6
0
def test_ApplyTransforms_outputs():
    output_map = dict(output_image=dict())
    outputs = ApplyTransforms.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Пример #7
0
def test_ApplyTransforms_inputs():
    input_map = dict(
        args=dict(argstr='%s', ),
        default_value=dict(
            argstr='--default-value %d',
            usedefault=True,
        ),
        dimension=dict(argstr='--dimensionality %d', ),
        environ=dict(
            nohash=True,
            usedefault=True,
        ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        input_image=dict(
            argstr='--input %s',
            mandatory=True,
        ),
        input_image_type=dict(argstr='--input-image-type %d', ),
        interpolation=dict(
            argstr='%s',
            usedefault=True,
        ),
        invert_transform_flags=dict(),
        num_threads=dict(
            nohash=True,
            usedefault=True,
        ),
        out_postfix=dict(usedefault=True, ),
        output_image=dict(
            argstr='--output %s',
            genfile=True,
            hash_files=False,
        ),
        print_out_composite_warp_file=dict(requires=['output_image'], ),
        reference_image=dict(
            argstr='--reference-image %s',
            mandatory=True,
        ),
        terminal_output=dict(
            mandatory=True,
            nohash=True,
        ),
        transforms=dict(
            argstr='%s',
            mandatory=True,
        ),
    )
    inputs = ApplyTransforms.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyTransforms_inputs():
    input_map = dict(args=dict(argstr='%s',
    ),
    default_value=dict(argstr='--default-value %d',
    usedefault=True,
    ),
    dimension=dict(argstr='--dimensionality %d',
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    input_image=dict(argstr='--input %s',
    mandatory=True,
    ),
    input_image_type=dict(argstr='--input-image-type %d',
    ),
    interpolation=dict(argstr='%s',
    usedefault=True,
    ),
    invert_transform_flags=dict(),
    num_threads=dict(nohash=True,
    usedefault=True,
    ),
    out_postfix=dict(usedefault=True,
    ),
    output_image=dict(argstr='--output %s',
    genfile=True,
    hash_files=False,
    ),
    print_out_composite_warp_file=dict(requires=['output_image'],
    ),
    reference_image=dict(argstr='--reference-image %s',
    mandatory=True,
    ),
    terminal_output=dict(mandatory=True,
    nohash=True,
    ),
    transforms=dict(argstr='%s',
    mandatory=True,
    ),
    )
    inputs = ApplyTransforms.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Пример #9
0
def apply_transform(input_img, reference_img, transforms, output_img):
    """

    :param input_img:
    :param reference_img:
    :param transforms: should be a list of .mat and warp.nii.gz
    :param output_img:
    :return:
    """
    at1 = ApplyTransforms()
    at1.inputs.dimension = 2
    at1.inputs.input_image = input_img
    at1.inputs.reference_image = reference_img
    at1.inputs.output_image = output_img
    # at1.inputs.interpolation = 'BSpline'
    # at1.inputs.interpolation_parameters = (5,)
    at1.inputs.default_value = 0
    at1.inputs.transforms = transforms
    at1.inputs.invert_transform_flags = [False, False]
    args = shlex.split(at1.cmdline)
    p = subprocess.Popen(args)
    p.wait()
Пример #10
0
    def timeseries_normalization_to_atlas_pipeline(self, **kwargs):

        pipeline = self.create_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            inputs=[
                DatasetSpec('cleaned_file', nifti_gz_format),
                DatasetSpec('coreg_to_atlas_warp', nifti_gz_format),
                DatasetSpec('coreg_to_atlas_mat', text_matrix_format),
                DatasetSpec('coreg_matrix', text_matrix_format)
            ],
            outputs=[DatasetSpec('normalized_ts', nifti_gz_format)],
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        merge_trans = pipeline.create_node(NiPypeMerge(3),
                                           name='merge_transforms',
                                           wall_time=1)
        pipeline.connect_input('coreg_to_atlas_warp', merge_trans, 'in1')
        pipeline.connect_input('coreg_to_atlas_mat', merge_trans, 'in2')
        pipeline.connect_input('coreg_matrix', merge_trans, 'in3')

        apply_trans = pipeline.create_node(ApplyTransforms(),
                                           name='ApplyTransform',
                                           wall_time=7,
                                           memory=24000,
                                           requirements=[ants2_req])
        ref_brain = self.parameter('MNI_template')
        apply_trans.inputs.reference_image = ref_brain
        apply_trans.inputs.interpolation = 'Linear'
        apply_trans.inputs.input_image_type = 3
        pipeline.connect(merge_trans, 'out', apply_trans, 'transforms')
        pipeline.connect_input('cleaned_file', apply_trans, 'input_image')

        pipeline.connect_output('normalized_ts', apply_trans, 'output_image')

        return pipeline
Пример #11
0
    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Пример #12
0
    def _optiBET_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc=("Modified implementation of optiBET.sh"),
            citations=[fsl_cite])

        mni_reg = pipeline.add(
            'T1_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                out_prefix='T12MNI',
                num_threads=4),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(2),
            inputs={
                'in1': (mni_reg, 'inv_warp'),
                'in2': (mni_reg, 'regmat')},
            wall_time=1)

        trans_flags = pipeline.add(
            'trans_flags',
            Merge(2,
                  in1=False,
                  in2=True),
            wall_time=1)

        apply_trans = pipeline.add(
            'ApplyTransform',
            ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3),
            inputs={
                'input_image': ('template_mask', nifti_gz_format),
                'reference_image': ('mag_preproc', nifti_gz_format),
                'transforms': (merge_trans, 'out'),
                'invert_transform_flags': (trans_flags, 'out')},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2.0')])

        maths1 = pipeline.add(
            'binarize',
            fsl.ImageMaths(
                suffix='_optiBET_brain_mask',
                op_string='-bin',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (apply_trans, 'output_image')},
            outputs={
                'brain_mask': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        maths2 = pipeline.add(
            'mask',
            fsl.ImageMaths(
                suffix='_optiBET_brain',
                op_string='-mas',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'in_file2': (maths1, 'out_file')},
            outputs={
                'brain': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        if self.branch('optibet_gen_report'):
            pipeline.add(
                'slices',
                FSLSlices(
                    outname='optiBET_report',
                    output_type='NIFTI_GZ'),
                wall_time=5,
                inputs={
                    'im1': ('mag_preproc', nifti_gz_format),
                    'im2': (maths2, 'out_file')},
                outputs={
                    'optiBET_report': ('report', gif_format)},
                requirements=[fsl_req.v('5.0.8')])

        return pipeline
Пример #13
0
    def _optiBET_brain_extraction_pipeline(self, in_file, **kwargs):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """

        outputs = [
            DatasetSpec('brain', nifti_gz_format),
            DatasetSpec('brain_mask', nifti_gz_format)
        ]
        if self.parameter('optibet_gen_report'):
            outputs.append(DatasetSpec('optiBET_report', gif_format))
        pipeline = self.create_pipeline(
            name='brain_extraction',
            inputs=[DatasetSpec(in_file, nifti_gz_format)],
            outputs=outputs,
            desc=("Modified implementation of optiBET.sh"),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        mni_reg = pipeline.create_node(AntsRegSyn(num_dimensions=3,
                                                  transformation='s',
                                                  out_prefix='T12MNI',
                                                  num_threads=4),
                                       name='T1_reg',
                                       wall_time=25,
                                       requirements=[ants2_req])
        mni_reg.inputs.ref_file = self.parameter('MNI_template')
        pipeline.connect_input(in_file, mni_reg, 'input_file')

        merge_trans = pipeline.create_node(Merge(2),
                                           name='merge_transforms',
                                           wall_time=1)
        pipeline.connect(mni_reg, 'inv_warp', merge_trans, 'in1')
        pipeline.connect(mni_reg, 'regmat', merge_trans, 'in2')

        trans_flags = pipeline.create_node(Merge(2),
                                           name='trans_flags',
                                           wall_time=1)
        trans_flags.inputs.in1 = False
        trans_flags.inputs.in2 = True

        apply_trans = pipeline.create_node(ApplyTransforms(),
                                           name='ApplyTransform',
                                           wall_time=7,
                                           memory=24000,
                                           requirements=[ants2_req])
        apply_trans.inputs.input_image = self.parameter('MNI_template_mask')
        apply_trans.inputs.interpolation = 'NearestNeighbor'
        apply_trans.inputs.input_image_type = 3
        pipeline.connect(merge_trans, 'out', apply_trans, 'transforms')
        pipeline.connect(trans_flags, 'out', apply_trans,
                         'invert_transform_flags')
        pipeline.connect_input(in_file, apply_trans, 'reference_image')

        maths1 = pipeline.create_node(fsl.ImageMaths(
            suffix='_optiBET_brain_mask', op_string='-bin'),
                                      name='binarize',
                                      wall_time=5,
                                      requirements=[fsl5_req])
        pipeline.connect(apply_trans, 'output_image', maths1, 'in_file')
        maths2 = pipeline.create_node(fsl.ImageMaths(suffix='_optiBET_brain',
                                                     op_string='-mas'),
                                      name='mask',
                                      wall_time=5,
                                      requirements=[fsl5_req])
        pipeline.connect_input(in_file, maths2, 'in_file')
        pipeline.connect(maths1, 'out_file', maths2, 'in_file2')
        if self.parameter('optibet_gen_report'):
            slices = pipeline.create_node(FSLSlices(),
                                          name='slices',
                                          wall_time=5,
                                          requirements=[fsl5_req])
            slices.inputs.outname = 'optiBET_report'
            pipeline.connect_input(in_file, slices, 'im1')
            pipeline.connect(maths2, 'out_file', slices, 'im2')
            pipeline.connect_output('optiBET_report', slices, 'report')

        pipeline.connect_output('brain_mask', maths1, 'out_file')
        pipeline.connect_output('brain', maths2, 'out_file')

        return pipeline