Exemple #1
0
class T2starStudy(MriStudy, metaclass=StudyMetaClass):

    desc = "T2*-weighted MRI contrast"

    add_data_specs = [
        # Set the magnitude to be generated from the preprocess_channels
        # pipeline
        FilesetSpec('magnitude',
                    nifti_gz_format,
                    'preprocess_channels_pipeline',
                    desc=("Generated from separate channel signals, "
                          "provided to 'channels'.")),
        # QSM and phase processing
        FilesetSpec('swi', nifti_gz_format, 'swi_pipeline'),
        FilesetSpec('qsm',
                    nifti_gz_format,
                    'qsm_pipeline',
                    desc=("Quantitative susceptibility image resolved "
                          "from T2* coil images")),
        # Vein analysis
        FilesetSpec('composite_vein_image', nifti_gz_format, 'cv_pipeline'),
        FilesetSpec('vein_mask', nifti_gz_format, 'shmrf_pipeline'),
        # Templates
        InputFilesetSpec('mni_template_qsm_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('QSMPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_swi_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SWIPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_atlas_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('VeinFrequencyPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_vein_atlas',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('VeinFrequencyMap',
                                                    nifti_gz_format))
    ]

    add_param_specs = [
        SwitchSpec('qsm_dual_echo', False),
        ParamSpec('qsm_echo',
                  1,
                  desc=("Which echo (by index starting at 1) to use when "
                        "using single echo")),
        ParamSpec('qsm_padding', [12, 12, 12]),
        ParamSpec('qsm_mask_dialation', [11, 11, 11]),
        ParamSpec('qsm_erosion_size', 10),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_robust', False),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    def preprocess_channels_pipeline(self, **name_maps):
        pipeline = super().preprocess_channels_pipeline(**name_maps)
        # Connect combined first echo output to the magnitude data spec
        pipeline.connect_output('magnitude', pipeline.node('to_polar'),
                                'first_echo', nifti_gz_format)
        return pipeline

    def qsm_pipeline(self, **name_maps):
        """
        Process dual echo data for QSM (TE=[7.38, 22.14])

        NB: Default values come from the STI-Suite
        """
        pipeline = self.new_pipeline(
            name='qsm_pipeline',
            name_maps=name_maps,
            desc="Resolve QSM from t2star coils",
            citations=[sti_cites, fsl_cite, matlab_cite])

        erosion = pipeline.add(
            'mask_erosion',
            fsl.ErodeImage(kernel_shape='sphere',
                           kernel_size=self.parameter('qsm_erosion_size'),
                           output_type='NIFTI'),
            inputs={'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=15,
            mem_gb=12)

        # If we have multiple echoes we can combine the phase images from
        # each channel into a single image. Otherwise for single echo sequences
        # we need to perform QSM on each coil separately and then combine
        # afterwards.
        if self.branch('qsm_dual_echo'):
            # Combine channels to produce phase and magnitude images
            channel_combine = pipeline.add(
                'channel_combine',
                HIPCombineChannels(),
                inputs={
                    'magnitudes_dir': ('mag_channels', multi_nifti_gz_format),
                    'phases_dir': ('phase_channels', multi_nifti_gz_format)
                })

            # Unwrap phase using Laplacian unwrapping
            unwrap = pipeline.add(
                'unwrap',
                UnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (channel_combine, 'phase')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Remove background noise
            vsharp = pipeline.add(
                "vsharp",
                VSharp(mask_manip="imerode({}>0, ball(5))"),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (unwrap, 'out_file'),
                    'mask': (erosion, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Run QSM iLSQR
            pipeline.add('qsmrecon',
                         QSMiLSQR(mask_manip="{}>0",
                                  padsize=self.parameter('qsm_padding')),
                         inputs={
                             'voxelsize': ('voxel_sizes', float),
                             'te': ('echo_times', float),
                             'B0': ('main_field_strength', float),
                             'H': ('main_field_orient', float),
                             'in_file': (vsharp, 'out_file'),
                             'mask': (vsharp, 'new_mask')
                         },
                         outputs={'qsm': ('qsm', nifti_format)},
                         requirements=[matlab_req.v('r2017a'),
                                       sti_req.v(2.2)])

        else:
            # Dialate eroded mask
            dialate = pipeline.add(
                'dialate',
                DialateMask(dialation=self.parameter('qsm_mask_dialation')),
                inputs={'in_file': (erosion, 'out_file')},
                requirements=[matlab_req.v('r2017a')])

            # List files for the phases of separate channel
            list_phases = pipeline.add(
                'list_phases',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={
                    'directory': ('phase_channels', multi_nifti_gz_format)
                })

            # List files for the phases of separate channel
            list_mags = pipeline.add(
                'list_mags',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={'directory': ('mag_channels', multi_nifti_gz_format)})

            # Generate coil specific masks
            mask_coils = pipeline.add(
                'mask_coils',
                MaskCoils(dialation=self.parameter('qsm_mask_dialation')),
                inputs={
                    'masks': (list_mags, 'files'),
                    'whole_brain_mask': (dialate, 'out_file')
                },
                requirements=[matlab_req.v('r2017a')])

            # Unwrap phase
            unwrap = pipeline.add(
                'unwrap',
                BatchUnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (list_phases, 'files')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Background phase removal
            vsharp = pipeline.add(
                "vsharp",
                BatchVSharp(mask_manip='{}>0'),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'mask': (mask_coils, 'out_files'),
                    'in_file': (unwrap, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            first_echo_time = pipeline.add(
                'first_echo',
                Select(index=0),
                inputs={'inlist': ('echo_times', float)})

            # Perform channel-wise QSM
            coil_qsm = pipeline.add(
                'coil_qsmrecon',
                BatchQSMiLSQR(mask_manip="{}>0",
                              padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'B0': ('main_field_strength', float),
                    'H': ('main_field_orient', float),
                    'in_file': (vsharp, 'out_file'),
                    'mask': (vsharp, 'new_mask'),
                    'te': (first_echo_time, 'out')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)],
                wall_time=45)  # FIXME: Should be dependent on number of coils

            # Combine channel QSM by taking the median coil value
            pipeline.add('combine_qsm',
                         MedianInMasks(),
                         inputs={
                             'channels': (coil_qsm, 'out_file'),
                             'channel_masks': (vsharp, 'new_mask'),
                             'whole_brain_mask': (dialate, 'out_file')
                         },
                         outputs={'qsm': ('out_file', nifti_format)},
                         requirements=[matlab_req.v('r2017a')])
        return pipeline

    def swi_pipeline(self, **name_maps):

        raise NotImplementedError

        pipeline = self.new_pipeline(
            name='swi',
            name_maps=name_maps,
            desc=("Calculate susceptibility-weighted image from magnitude and "
                  "phase"))

        return pipeline

    def cv_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='cv_pipeline',
                                     name_maps=name_maps,
                                     desc="Compute Composite Vein Image",
                                     citations=[fsl_cite, matlab_cite])

        # Interpolate priors and atlas
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(3),
                                   inputs={
                                       'in1':
                                       ('coreg_ants_mat', text_matrix_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format)
                                   })

        apply_trans_q = pipeline.add(
            'ApplyTransform_Q_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_qsm_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_s = pipeline.add(
            'ApplyTransform_S_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_swi_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_a = pipeline.add(
            'ApplyTransform_A_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
            ),
            inputs={
                'reference_image': ('qsm', nifti_gz_format),
                'input_image': ('mni_template_atlas_prior', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_v = pipeline.add(
            'ApplyTransform_V_Atlas',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_vein_atlas', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        # Run CV code
        pipeline.add(
            'cv_image',
            interface=CompositeVeinImage(),
            inputs={
                'mask': ('brain_mask', nifti_format),
                'qsm': ('qsm', nifti_format),
                'swi': ('swi', nifti_format),
                'q_prior': (apply_trans_q, 'output_image'),
                's_prior': (apply_trans_s, 'output_image'),
                'a_prior': (apply_trans_a, 'output_image'),
                'vein_atlas': (apply_trans_v, 'output_image')
            },
            outputs={'composite_vein_image': ('out_file', nifti_format)},
            requirements=[matlab_req.v('R2015a')],
            wall_time=300,
            mem_gb=24)

        return pipeline

    def shmrf_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='shmrf_pipeline',
                                     name_maps=name_maps,
                                     desc="Compute Vein Mask using ShMRF",
                                     citations=[fsl_cite, matlab_cite])

        # Run ShMRF code
        pipeline.add('shmrf',
                     ShMRF(),
                     inputs={
                         'in_file': ('composite_vein_image', nifti_format),
                         'mask': ('brain_mask', nifti_format)
                     },
                     outputs={'vein_mask': ('out_file', nifti_format)},
                     requirements=[matlab_req.v('R2015a')],
                     wall_time=30,
                     mem_gb=16)

        return pipeline

    def cet_T2s(self, **options):

        pipeline = self.new_pipeline(
            name='CET_T2s',
            desc=("Construct cerebellum mask using SUIT template"),
            default_options={
                'SUIT_mask': self._lookup_template_mask_path('SUIT')
            },
            citations=[fsl_cite],
            options=options)

        # Initially use MNI space to warp SUIT mask into T2s space
        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(3),
            inputs={
                'in3': (self._lookup_nl_tfm_inv_name('SUIT'), nifti_gz_format),
                'in2': (self._lookup_l_tfm_to_name('SUIT'), nifti_gz_format),
                'in1': ('T2s_to_T1_mat', text_matrix_format)
            })

        apply_trans = pipeline.add(
            'ApplyTransform',
            ants.resampling.ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
                input_image=pipeline.option('SUIT_mask')),
            inputs={
                'transforms': (merge_trans, 'out'),
                'reference_image': ('betted_T2s', nifti_gz_format)
            },
            outputs={'cetted_T2s_mask': ('output_image', nifti_gz_format)},
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=120)

        # Combine masks
        maths1 = pipeline.add('combine_masks',
                              fsl.utils.ImageMaths(suffix='_optiBET_masks',
                                                   op_string='-mas',
                                                   output_type='NIFTI_GZ'),
                              inputs={
                                  'in_file':
                                  ('betted_T2s_mask', nifti_gz_format),
                                  'in_file2': (apply_trans, 'output_image')
                              },
                              requirements=[fsl_req.v('5.0.8')],
                              mem_gb=16,
                              wall_time=5)

        # Mask out t2s image
        pipeline.add('mask_t2s',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas',
                                          output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': ('betted_T2s', nifti_gz_format),
                         'in_file2': (maths1, 'output_image')
                     },
                     outputs={'cetted_T2s': ('out_file', nifti_gz_format)},
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        pipeline.add(
            'mask_t2s_last_echo',
            fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                 op_string='-mas',
                                 output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('betted_T2s_last_echo', nifti_gz_format),
                'in_file2': (maths1, 'output_image')
            },
            outputs={'cetted_T2s_last_echo': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            mem_gb=16,
            wall_time=5)

        return pipeline

    def bet_T2s(self, **options):

        pipeline = self.new_pipeline(name='BET_T2s',
                                     desc=("python implementation of BET"),
                                     default_options={},
                                     citations=[fsl_cite],
                                     options=options)

        bet = pipeline.add('bet',
                           fsl.BET(frac=0.1, mask=True,
                                   output_type='NIFTI_GZ'),
                           inputs={'in_file': ('t2s', nifti_gz_format)},
                           outputs={
                               'betted_T2s': ('out_file', nifti_gz_format),
                               'betted_T2s_mask':
                               ('mask_file', nifti_gz_format)
                           },
                           requirements=[fsl_req.v('5.0.8')],
                           mem_gb=8,
                           wall_time=45)

        pipeline.add(
            'mask',
            fsl.utils.ImageMaths(suffix='_BET_brain',
                                 op_string='-mas',
                                 output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('t2s_last_echo', nifti_gz_format),
                'in_file2': (bet, 'mask_file')
            },
            outputs={'betted_T2s_last_echo': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            mem_gb=16,
            wall_time=5)

        return pipeline
Exemple #2
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': ('mag_preproc', nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline

    def bet_T1(self, **name_maps):

        pipeline = self.new_pipeline(
            name='BET_T1',
            name_maps=name_maps,
            desc=("Brain extraction pipeline using FSL's BET"),
            citations=[fsl_cite])

        bias = pipeline.add('n4_bias_correction',
                            ants.N4BiasFieldCorrection(),
                            inputs={'input_image': ('t1', nifti_gz_format)},
                            requirements=[ants_req.v('1.9')],
                            wall_time=60,
                            mem_gb=12)

        pipeline.add('bet',
                     fsl.BET(frac=0.15,
                             reduce_bias=True,
                             output_type='NIFTI_GZ'),
                     inputs={'in_file': (bias, 'output_image')},
                     outputs={
                         'betted_T1': ('out_file', nifti_gz_format),
                         'betted_T1_mask': ('mask_file', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=8,
                     wall_time=45)

        return pipeline

    def cet_T1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='CET_T1',
            name_maps=name_maps,
            desc=("Construct cerebellum mask using SUIT template"),
            citations=[fsl_cite])

        # FIXME: Should convert to inputs
        nl = self._lookup_nl_tfm_inv_name('MNI')
        linear = self._lookup_l_tfm_to_name('MNI')

        # Initially use MNI space to warp SUIT into T1 and threshold to mask
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in2': (nl, nifti_gz_format),
                                       'in1': (linear, nifti_gz_format)
                                   })

        apply_trans = pipeline.add('ApplyTransform',
                                   ants.resampling.ApplyTransforms(
                                       interpolation='NearestNeighbor',
                                       input_image_type=3,
                                       invert_transform_flags=[True, False]),
                                   inputs={
                                       'reference_image':
                                       ('betted_T1', nifti_gz_format),
                                       'input_image':
                                       ('suit_mask', nifti_gz_format),
                                       'transforms': (merge_trans, 'out')
                                   },
                                   requirements=[ants_req.v('1.9')],
                                   mem_gb=16,
                                   wall_time=120)

        pipeline.add('maths2',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas'),
                     inputs={
                         'in_file': ('betted_T1', nifti_gz_format),
                         'in_file2': (apply_trans, 'output_image')
                     },
                     outputs={
                         'cetted_T1': ('out_file', nifti_gz_format),
                         'cetted_T1_mask': ('output_image', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        return pipeline
Exemple #3
0
class MotionDetectionMixin(MultiStudy, metaclass=MultiStudyMetaClass):

    #     add_substudy_specs = [
    #         SubStudySpec('pet_mc', PetStudy)]

    add_data_specs = [
        InputFilesetSpec('pet_data_dir', directory_format, optional=True),
        InputFilesetSpec('pet_data_reconstructed',
                         directory_format,
                         optional=True),
        InputFilesetSpec('struct2align', nifti_gz_format, optional=True),
        InputFilesetSpec('umap', dicom_format, optional=True),
        FilesetSpec('pet_data_prepared', directory_format,
                    'prepare_pet_pipeline'),
        FilesetSpec('static_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('dynamic_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('mean_displacement', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_consecutive', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mats4average', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('start_times', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('motion_par_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('motion_par', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('offset_indexes', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('severe_motion_detection_report', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('frame_start_times', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('frame_vol_numbers', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('timestamps', directory_format, 'motion_framing_pipeline'),
        FilesetSpec('mean_displacement_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('rotation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('translation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('average_mats', directory_format,
                    'frame_mean_transformation_mats_pipeline'),
        FilesetSpec('correction_factors', text_format,
                    'pet_correction_factors_pipeline'),
        FilesetSpec('umaps_align2ref', directory_format,
                    'umap_realignment_pipeline'),
        FilesetSpec('umap_aligned_dicoms', directory_format,
                    'nifti2dcm_conversion_pipeline'),
        FilesetSpec('motion_detection_output', directory_format,
                    'gather_outputs_pipeline'),
        FilesetSpec('moco_series', directory_format,
                    'create_moco_series_pipeline'),
        FilesetSpec('fixed_binning_mats', directory_format,
                    'fixed_binning_pipeline'),
        FieldSpec('pet_duration', int, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_header_extraction_pipeline')
    ]

    add_param_specs = [
        ParamSpec('framing_th', 2.0),
        ParamSpec('framing_temporal_th', 30.0),
        ParamSpec('framing_duration', 0),
        ParamSpec('md_framing', True),
        ParamSpec('align_pct', False),
        ParamSpec('align_fixed_binning', False),
        ParamSpec('moco_template',
                  os.path.join(reference_path, 'moco_template.IMA')),
        ParamSpec('PET_template_MNI',
                  os.path.join(template_path, 'PET_template_MNI.nii.gz')),
        ParamSpec('fixed_binning_n_frames', 0),
        ParamSpec('pet_offset', 0),
        ParamSpec('fixed_binning_bin_len', 60),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('PET2MNI_reg', False),
        ParamSpec('dynamic_pet_mc', False)
    ]

    def mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mean_displacement_calculation',
            desc=("Calculate the mean displacement between each motion"
                  " matrix and a reference."),
            citations=[fsl_cite],
            name_maps=name_maps)

        motion_mats_in = {}
        tr_in = {}
        start_time_in = {}
        real_duration_in = {}
        merge_index = 1
        input_names = []
        for spec in self.substudy_specs():
            try:
                spec.map('motion_mats')
            except ArcanaNameError:
                pass  # Sub study doesn't have motion mats spec
            else:
                k = 'in{}'.format(merge_index)
                motion_mats_in[k] = (spec.map('motion_mats'),
                                     motion_mats_format)
                tr_in[k] = (spec.map('tr'), float)
                start_time_in[k] = (spec.map('start_time'), float)
                real_duration_in[k] = (spec.map('real_duration'), float)
                input_names.append(
                    self.spec(spec.map(
                        spec.study_class.primary_scan_name)).pattern)
                merge_index += 1

        merge_motion_mats = pipeline.add('merge_motion_mats',
                                         Merge(len(motion_mats_in)),
                                         inputs=motion_mats_in)

        merge_tr = pipeline.add('merge_tr', Merge(len(tr_in)), inputs=tr_in)

        merge_start_time = pipeline.add('merge_start_time',
                                        Merge(len(start_time_in)),
                                        inputs=start_time_in)

        merge_real_duration = pipeline.add('merge_real_duration',
                                           Merge(len(real_duration_in)),
                                           inputs=real_duration_in)

        pipeline.add(
            'scan_time_info',
            MeanDisplacementCalculation(input_names=input_names),
            inputs={
                'motion_mats': (merge_motion_mats, 'out'),
                'trs': (merge_tr, 'out'),
                'start_times': (merge_start_time, 'out'),
                'real_durations': (merge_real_duration, 'out'),
                'reference': ('ref_brain', nifti_gz_format)
            },
            outputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_rc': ('mean_displacement_rc', text_format),
                'mean_displacement_consecutive':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format),
                'motion_par_rc': ('motion_parameters_rc', text_format),
                'motion_par': ('motion_parameters', text_format),
                'offset_indexes': ('offset_indexes', text_format),
                'mats4average': ('mats4average', text_format),
                'severe_motion_detection_report':
                ('corrupted_volumes', text_format)
            })

        return pipeline

    def motion_framing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_framing',
            desc=("Calculate when the head movement exceeded a "
                  "predefined threshold (default 2mm)."),
            citations=[fsl_cite],
            name_maps=name_maps)

        framing = pipeline.add(
            'motion_framing',
            MotionFraming(
                motion_threshold=self.parameter('framing_th'),
                temporal_threshold=self.parameter('framing_temporal_th'),
                pet_offset=self.parameter('pet_offset'),
                pet_duration=self.parameter('framing_duration')),
            inputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_consec':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format)
            },
            outputs={
                'frame_start_times': ('frame_start_times', text_format),
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'timestamps': ('timestamps_dir', directory_format)
            })

        if 'pet_data_dir' in self.input_names:
            pipeline.connect_input('pet_start_time', framing, 'pet_start_time')
            pipeline.connect_input('pet_end_time', framing, 'pet_end_time')

        return pipeline

    def plot_mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='plot_mean_displacement',
            desc=("Plot the mean displacement real clock"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'plot_md',
            PlotMeanDisplacementRC(framing=self.parameter('md_framing')),
            inputs={
                'mean_disp_rc': ('mean_displacement_rc', text_format),
                'false_indexes': ('offset_indexes', text_format),
                'frame_start_times': ('frame_start_times', text_format),
                'motion_par_rc': ('motion_par_rc', text_format)
            },
            outputs={
                'mean_displacement_plot': ('mean_disp_plot', png_format),
                'rotation_plot': ('rot_plot', png_format),
                'translation_plot': ('trans_plot', png_format)
            })

        return pipeline

    def frame_mean_transformation_mats_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='frame_mean_transformation_mats',
            desc=("Average all the transformation mats within each "
                  "detected frame."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'mats_averaging',
            AffineMatAveraging(),
            inputs={
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'all_mats4average': ('mats4average', text_format)
            },
            outputs={'average_mats': ('average_mats', directory_format)})

        return pipeline

    def fixed_binning_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fixed_binning',
            desc=("Pipeline to generate average motion matrices for "
                  "each bin in a dynamic PET reconstruction experiment."
                  "This will be the input for the dynamic motion correction."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'fixed_binning',
            FixedBinning(n_frames=self.parameter('fixed_binning_n_frames'),
                         pet_offset=self.parameter('pet_offset'),
                         bin_len=self.parameter('fixed_binning_bin_len')),
            inputs={
                'start_times': ('start_times', text_format),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int),
                'motion_mats': ('mats4average', text_format)
            },
            outputs={
                'fixed_binning_mats': ('average_bin_mats', directory_format)
            })

        return pipeline

    def pet_correction_factors_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='pet_correction_factors',
            desc=("Pipeline to calculate the correction factors to "
                  "account for frame duration when averaging the PET "
                  "frames to create the static PET image"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'pet_corr_factors',
            PetCorrectionFactor(),
            inputs={'timestamps': ('timestamps', directory_format)},
            outputs={'correction_factors': ('corr_factors', text_format)})

        return pipeline

    def nifti2dcm_conversion_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='conversion_to_dicom',
            desc=("Conversing aligned umap from nifti to dicom format - "
                  "parallel implementation"),
            citations=(),
            name_maps=name_maps)

        list_niftis = pipeline.add(
            'list_niftis',
            ListDir(),
            inputs={'directory': ('umaps_align2ref', directory_format)})

        reorient_niftis = pipeline.add('reorient_niftis',
                                       ReorientUmap(),
                                       inputs={
                                           'niftis': (list_niftis, 'files'),
                                           'umap': ('umap', dicom_format)
                                       },
                                       requirements=[mrtrix_req.v('3.0rc3')])

        list_dicoms = pipeline.add(
            'list_dicoms',
            ListDir(sort_key=dicom_fname_sort_key),
            inputs={'directory': ('umap', dicom_format)})

        nii2dicom = pipeline.add(
            'nii2dicom',
            Nii2Dicom(
                # extension='Frame',  #  nii2dicom parameter
            ),
            inputs={'reference_dicom': (list_dicoms, 'files')},
            outputs={'in_file': (reorient_niftis, 'reoriented_umaps')},
            iterfield=['in_file'],
            wall_time=20)

        pipeline.add(
            'copy2dir',
            CopyToDir(extension='Frame'),
            inputs={'in_files': (nii2dicom, 'out_file')},
            outputs={'umap_aligned_dicoms': ('out_dir', directory_format)})

        return pipeline

    def umap_realignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='umap_realignment',
            desc=("Pipeline to align the original umap (if provided)"
                  "to match the head position in each frame and improve the "
                  "static PET image quality."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'umap2ref_alignment',
            UmapAlign2Reference(pct=self.parameter('align_pct')),
            inputs={
                'ute_regmat': ('umap_ref_coreg_matrix', text_matrix_format),
                'ute_qform_mat': ('umap_ref_qform_mat', text_matrix_format),
                'average_mats': ('average_mats', directory_format),
                'umap': ('umap', nifti_gz_format)
            },
            outputs={'umaps_align2ref': ('umaps_align2ref', directory_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def create_moco_series_pipeline(self, **name_maps):
        """This pipeline is probably wrong as we still do not know how to
        import back the new moco series into the scanner. This was just a first
        attempt.
        """

        pipeline = self.new_pipeline(
            name='create_moco_series',
            desc=("Pipeline to generate a moco_series that can be then "
                  "imported back in the scanner and used to correct the"
                  " pet data"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'create_moco_series',
            CreateMocoSeries(moco_template=self.parameter('moco_template')),
            inputs={
                'start_times': ('start_times', text_format),
                'motion_par': ('motion_par', text_format)
            },
            outputs={'moco_series': ('modified_moco', directory_format)})

        return pipeline

    def gather_outputs_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gather_motion_detection_outputs',
            desc=("Pipeline to gather together all the outputs from "
                  "the motion detection pipeline."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_inputs = pipeline.add(
            'merge_inputs',
            Merge(5),
            inputs={
                'in1': ('mean_displacement_plot', png_format),
                'in2': ('motion_par', text_format),
                'in3': ('correction_factors', text_format),
                'in4': ('severe_motion_detection_report', text_format),
                'in5': ('timestamps', directory_format)
            })

        pipeline.add(
            'copy2dir',
            CopyToDir(),
            inputs={'in_files': (merge_inputs, 'out')},
            outputs={'motion_detection_output': ('out_dir', directory_format)})

        return pipeline

    prepare_pet_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_data_preparation_pipeline')

    pet_header_extraction_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_time_info_extraction_pipeline')

    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Exemple #4
0
def create_motion_detection_class(name,
                                  ref=None,
                                  ref_type=None,
                                  t1s=None,
                                  t2s=None,
                                  dwis=None,
                                  epis=None,
                                  pet_data_dir=None):

    inputs = []
    dct = {}
    data_specs = []
    run_pipeline = False
    param_specs = [ParamSpec('ref_resampled_resolution', [1])]

    if pet_data_dir is not None:
        inputs.append(
            InputFilesets('pet_data_dir', 'pet_data_dir', directory_format))

    if not ref:
        raise Exception('A reference image must be provided!')
    if ref_type == 't1':
        ref_study = T1Study
    elif ref_type == 't2':
        ref_study = T2Study
    else:
        raise Exception('{} is not a recognized ref_type!The available '
                        'ref_types are t1 or t2.'.format(ref_type))

    study_specs = [SubStudySpec('ref', ref_study)]
    ref_spec = {'coreg_ref_brain': 'ref_brain'}
    inputs.append(InputFilesets('ref_magnitude', ref, dicom_format))

    if t1s:
        study_specs.extend([
            SubStudySpec('t1_{}'.format(i), T1Study, ref_spec)
            for i in range(len(t1s))
        ])
        inputs.extend(
            InputFilesets('t1_{}_magnitude'.format(i), t1_scan, dicom_format)
            for i, t1_scan in enumerate(t1s))
        run_pipeline = True

    if t2s:
        study_specs.extend([
            SubStudySpec('t2_{}'.format(i), T2Study, ref_spec)
            for i in range(len(t2s))
        ])
        inputs.extend(
            InputFilesets('t2_{}_magnitude'.format(i), t2_scan, dicom_format)
            for i, t2_scan in enumerate(t2s))
        run_pipeline = True

    if epis:
        epi_refspec = ref_spec.copy()
        epi_refspec.update({
            'coreg_ref_wmseg': 'ref_wm_seg',
            'coreg_ref': 'ref_mag_preproc'
        })
        study_specs.extend(
            SubStudySpec('epi_{}'.format(i), EpiSeriesStudy, epi_refspec)
            for i in range(len(epis)))
        inputs.extend(
            InputFilesets('epi_{}_series'.format(i), epi_scan, dicom_format)
            for i, epi_scan in enumerate(epis))
        run_pipeline = True
    if dwis:
        unused_dwi = []
        dwis_main = [x for x in dwis if x[-1] == '0']
        dwis_ref = [x for x in dwis if x[-1] == '1']
        dwis_opposite = [x for x in dwis if x[-1] == '-1']
        b0_refspec = ref_spec.copy()
        b0_refspec.update({
            'coreg_ref_wmseg': 'ref_wm_seg',
            'coreg_ref': 'ref_mag_preproc'
        })
        if dwis_main and not dwis_opposite:
            logger.warning(
                'No opposite phase encoding direction b0 provided. DWI '
                'motion correction will be performed without distortion '
                'correction. THIS IS SUB-OPTIMAL!')
            study_specs.extend(
                SubStudySpec('dwi_{}'.format(i), DwiStudy, ref_spec)
                for i in range(len(dwis_main)))
            inputs.extend(
                InputFilesets('dwi_{}_series'.format(i), dwis_main_scan[0],
                              dicom_format)
                for i, dwis_main_scan in enumerate(dwis_main))
        if dwis_main and dwis_opposite:
            study_specs.extend(
                SubStudySpec('dwi_{}'.format(i), DwiStudy, ref_spec)
                for i in range(len(dwis_main)))
            inputs.extend(
                InputFilesets('dwi_{}_series'.format(i), dwis_main[i][0],
                              dicom_format) for i in range(len(dwis_main)))
            if len(dwis_main) <= len(dwis_opposite):
                inputs.extend(
                    InputFilesets('dwi_{}_magnitude'.format(i),
                                  dwis_opposite[i][0], dicom_format)
                    for i in range(len(dwis_main)))
            else:
                inputs.extend(
                    InputFilesets('dwi_{}_magnitude'.format(i),
                                  dwis_opposite[0][0], dicom_format)
                    for i in range(len(dwis_main)))
        if dwis_opposite and dwis_main and not dwis_ref:
            study_specs.extend(
                SubStudySpec('b0_{}'.format(i), DwiRefStudy, b0_refspec)
                for i in range(len(dwis_opposite)))
            inputs.extend(
                InputFilesets('b0_{}_magnitude'.format(i), dwis_opposite[i][0],
                              dicom_format) for i in range(len(dwis_opposite)))
            if len(dwis_opposite) <= len(dwis_main):
                inputs.extend(
                    InputFilesets('b0_{}_reverse_phase'.format(i), dwis_main[i]
                                  [0], dicom_format)
                    for i in range(len(dwis_opposite)))
            else:
                inputs.extend(
                    InputFilesets('b0_{}_reverse_phase'.format(i), dwis_main[0]
                                  [0], dicom_format)
                    for i in range(len(dwis_opposite)))
        elif dwis_opposite and dwis_ref:
            min_index = min(len(dwis_opposite), len(dwis_ref))
            study_specs.extend(
                SubStudySpec('b0_{}'.format(i), DwiRefStudy, b0_refspec)
                for i in range(min_index * 2))
            inputs.extend(
                InputFilesets('b0_{}_magnitude'.format(i), scan[0],
                              dicom_format)
                for i, scan in enumerate(dwis_opposite[:min_index] +
                                         dwis_ref[:min_index]))
            inputs.extend(
                InputFilesets('b0_{}_reverse_phase'.format(i), scan[0],
                              dicom_format)
                for i, scan in enumerate(dwis_ref[:min_index] +
                                         dwis_opposite[:min_index]))
            unused_dwi = [
                scan
                for scan in dwis_ref[min_index:] + dwis_opposite[min_index:]
            ]
        elif dwis_opposite or dwis_ref:
            unused_dwi = [scan for scan in dwis_ref + dwis_opposite]
        if unused_dwi:
            logger.info(
                'The following scans:\n{}\nwere not assigned during the DWI '
                'motion detection initialization (probably a different number '
                'of main DWI scans and b0 images was provided). They will be '
                'processed os "other" scans.'.format('\n'.join(
                    s[0] for s in unused_dwi)))
            study_specs.extend(
                SubStudySpec('t2_{}'.format(i), T2Study, ref_spec)
                for i in range(len(t2s),
                               len(t2s) + len(unused_dwi)))
            inputs.extend(
                InputFilesets('t2_{}_magnitude'.format(i), scan[0],
                              dicom_format)
                for i, scan in enumerate(unused_dwi, start=len(t2s)))
        run_pipeline = True

    if not run_pipeline:
        raise Exception('At least one scan, other than the reference, must be '
                        'provided!')

    dct['add_substudy_specs'] = study_specs
    dct['add_data_specs'] = data_specs
    dct['__metaclass__'] = MultiStudyMetaClass
    dct['add_param_specs'] = param_specs
    return MultiStudyMetaClass(name, (MotionDetectionMixin, ), dct), inputs
Exemple #5
0
class BoldStudy(EpiSeriesStudy, metaclass=StudyMetaClass):

    desc = "Functional MRI BOLD MRI contrast"

    add_data_specs = [
        InputFilesetSpec('train_data',
                         rfile_format,
                         optional=True,
                         frequency='per_study'),
        FilesetSpec('hand_label_noise', text_format,
                    'fix_preparation_pipeline'),
        FilesetSpec('labelled_components', text_format,
                    'fix_classification_pipeline'),
        FilesetSpec('cleaned_file', nifti_gz_format,
                    'fix_regression_pipeline'),
        FilesetSpec('filtered_data', nifti_gz_format,
                    'rsfMRI_filtering_pipeline'),
        FilesetSpec('mc_par', par_format, 'rsfMRI_filtering_pipeline'),
        FilesetSpec('melodic_ica', zip_format,
                    'single_subject_melodic_pipeline'),
        FilesetSpec('fix_dir', zip_format, 'fix_preparation_pipeline'),
        FilesetSpec('normalized_ts', nifti_gz_format,
                    'timeseries_normalization_to_atlas_pipeline'),
        FilesetSpec('smoothed_ts', nifti_gz_format, 'smoothing_pipeline')
    ]

    add_param_specs = [
        ParamSpec('component_threshold', 20),
        ParamSpec('motion_reg', True),
        ParamSpec('highpass', 0.01),
        ParamSpec('brain_thresh_percent', 5),
        ParamSpec('group_ica_components', 15)
    ]

    primary_bids_selector = BidsInputs(spec_name='series',
                                       type='bold',
                                       valid_formats=(nifti_gz_x_format,
                                                      nifti_gz_format))

    default_bids_inputs = [
        primary_bids_selector,
        BidsAssocInput(spec_name='field_map_phase',
                       primary=primary_bids_selector,
                       association='phasediff',
                       format=nifti_gz_format,
                       drop_if_missing=True),
        BidsAssocInput(spec_name='field_map_mag',
                       primary=primary_bids_selector,
                       association='phasediff',
                       type='magnitude',
                       format=nifti_gz_format,
                       drop_if_missing=True)
    ]

    def rsfMRI_filtering_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='rsfMRI_filtering',
            desc=("Spatial and temporal rsfMRI filtering"),
            citations=[fsl_cite],
            name_maps=name_maps)

        afni_mc = pipeline.add(
            'AFNI_MC',
            Volreg(zpad=1,
                   out_file='rsfmri_mc.nii.gz',
                   oned_file='prefiltered_func_data_mcf.par'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            outputs={'mc_par': ('oned_file', par_format)},
            wall_time=5,
            requirements=[afni_req.v('16.2.10')])

        filt = pipeline.add('Tproject',
                            Tproject(stopband=(0, 0.01),
                                     polort=3,
                                     blur=3,
                                     out_file='filtered_func_data.nii.gz'),
                            inputs={
                                'delta_t': ('tr', float),
                                'mask':
                                (self.brain_mask_spec_name, nifti_gz_format),
                                'in_file': (afni_mc, 'out_file')
                            },
                            wall_time=5,
                            requirements=[afni_req.v('16.2.10')])

        meanfunc = pipeline.add('meanfunc',
                                ImageMaths(op_string='-Tmean',
                                           suffix='_mean',
                                           output_type='NIFTI_GZ'),
                                wall_time=5,
                                inputs={'in_file': (afni_mc, 'out_file')},
                                requirements=[fsl_req.v('5.0.10')])

        pipeline.add('add_mean',
                     ImageMaths(op_string='-add', output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': (filt, 'out_file'),
                         'in_file2': (meanfunc, 'out_file')
                     },
                     outputs={'filtered_data': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def single_subject_melodic_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='MelodicL1',
            desc=("Single subject ICA analysis using FSL MELODIC."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('melodic_L1',
                     MELODIC(
                         no_bet=True,
                         bg_threshold=self.parameter('brain_thresh_percent'),
                         report=True,
                         out_stats=True,
                         mm_thresh=0.5,
                         out_dir='melodic_ica',
                         output_type='NIFTI_GZ'),
                     inputs={
                         'mask': (self.brain_mask_spec_name, nifti_gz_format),
                         'tr_sec': ('tr', float),
                         'in_files': ('filtered_data', nifti_gz_format)
                     },
                     outputs={'melodic_ica': ('out_dir', directory_format)},
                     wall_time=15,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def fix_preparation_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='prepare_fix',
            desc=("Pipeline to create the right folder structure before "
                  "running FIX"),
            citations=[fsl_cite],
            name_maps=name_maps)

        if self.branch('coreg_to_tmpl_method', 'ants'):

            struct_ants2fsl = pipeline.add(
                'struct_ants2fsl',
                ANTs2FSLMatrixConversion(ras2fsl=True),
                inputs={
                    'reference_file': ('template_brain', nifti_gz_format),
                    'itk_file': ('coreg_to_tmpl_ants_mat', text_matrix_format),
                    'source_file': ('coreg_ref_brain', nifti_gz_format)
                },
                requirements=[c3d_req.v('1.0.0')])

            struct_matrix = (struct_ants2fsl, 'fsl_matrix')
        else:
            struct_matrix = ('coreg_to_tmpl_fsl_mat', text_matrix_format)


#         if self.branch('coreg_method', 'ants'):
#         epi_ants2fsl = pipeline.add(
#             'epi_ants2fsl',
#             ANTs2FSLMatrixConversion(
#                 ras2fsl=True),
#             inputs={
#                 'source_file': ('brain', nifti_gz_format),
#                 'itk_file': ('coreg_ants_mat', text_matrix_format),
#                 'reference_file': ('coreg_ref_brain', nifti_gz_format)},
#             requirements=[c3d_req.v('1.0.0')])

        MNI2t1 = pipeline.add('MNI2t1',
                              ConvertXFM(invert_xfm=True),
                              inputs={'in_file': struct_matrix},
                              wall_time=5,
                              requirements=[fsl_req.v('5.0.9')])

        struct2epi = pipeline.add(
            'struct2epi',
            ConvertXFM(invert_xfm=True),
            inputs={'in_file': ('coreg_fsl_mat', text_matrix_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        meanfunc = pipeline.add(
            'meanfunc',
            ImageMaths(op_string='-Tmean',
                       suffix='_mean',
                       output_type='NIFTI_GZ'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add('prep_fix',
                     PrepareFIX(),
                     inputs={
                         'melodic_dir': ('melodic_ica', directory_format),
                         't1_brain': ('coreg_ref_brain', nifti_gz_format),
                         'mc_par': ('mc_par', par_format),
                         'epi_brain_mask': ('brain_mask', nifti_gz_format),
                         'epi_preproc': ('series_preproc', nifti_gz_format),
                         'filtered_epi': ('filtered_data', nifti_gz_format),
                         'epi2t1_mat': ('coreg_fsl_mat', text_matrix_format),
                         't12MNI_mat': (struct_ants2fsl, 'fsl_matrix'),
                         'MNI2t1_mat': (MNI2t1, 'out_file'),
                         't12epi_mat': (struct2epi, 'out_file'),
                         'epi_mean': (meanfunc, 'out_file')
                     },
                     outputs={
                         'fix_dir': ('fix_dir', directory_format),
                         'hand_label_noise': ('hand_label_file', text_format)
                     })

        return pipeline

    def fix_classification_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fix_classification',
            desc=("Automatic classification of noisy components from the "
                  "rsfMRI data using fsl FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "fix",
            FSLFIX(component_threshold=self.parameter('component_threshold'),
                   motion_reg=self.parameter('motion_reg'),
                   classification=True),
            inputs={
                "feat_dir": ("fix_dir", directory_format),
                "train_data": ("train_data", rfile_format)
            },
            outputs={'labelled_components': ('label_file', text_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def fix_regression_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='signal_regression',
            desc=("Regression of the noisy components from the rsfMRI data "
                  "using a python implementation equivalent to that in FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "signal_reg",
            SignalRegression(motion_regression=self.parameter('motion_reg'),
                             highpass=self.parameter('highpass')),
            inputs={
                "fix_dir": ("fix_dir", directory_format),
                "labelled_components": ("labelled_components", text_format)
            },
            outputs={'cleaned_file': ('output', nifti_gz_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline

    def smoothing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='smoothing_pipeline',
            desc=("Spatial smoothing of the normalized fmri file"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('3dBlurToFWHM',
                     BlurToFWHM(fwhm=5, out_file='smoothed_ts.nii.gz'),
                     inputs={
                         'mask': ('template_mask', nifti_gz_format),
                         'in_file': ('normalized_ts', nifti_gz_format)
                     },
                     outputs={'smoothed_ts': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[afni_req.v('16.2.10')])

        return pipeline
Exemple #6
0
def create_multi_fmri_class(name,
                            t1,
                            epis,
                            epi_number,
                            echo_spacing,
                            fm_mag=None,
                            fm_phase=None,
                            run_regression=False):

    inputs = []
    dct = {}
    data_specs = []
    param_specs = []
    output_files = []
    distortion_correction = False

    if fm_mag and fm_phase:
        logger.info('Both magnitude and phase field map images provided. EPI '
                    'ditortion correction will be performed.')
        distortion_correction = True
    elif fm_mag or fm_phase:
        logger.info(
            'In order to perform EPI ditortion correction both magnitude '
            'and phase field map images must be provided.')
    else:
        logger.info(
            'No field map image provided. Distortion correction will not be'
            'performed.')

    study_specs = [SubStudySpec('t1', T1Study)]
    ref_spec = {'t1_brain': 'coreg_ref_brain'}
    inputs.append(
        InputFilesets('t1_primary', t1, dicom_format, is_regex=True, order=0))
    epi_refspec = ref_spec.copy()
    epi_refspec.update({
        't1_wm_seg': 'coreg_ref_wmseg',
        't1_preproc': 'coreg_ref',
        'train_data': 'train_data'
    })
    study_specs.append(SubStudySpec('epi_0', BoldStudy, epi_refspec))
    if epi_number > 1:
        epi_refspec.update({
            't1_wm_seg':
            'coreg_ref_wmseg',
            't1_preproc':
            'coreg_ref',
            'train_data':
            'train_data',
            'epi_0_coreg_to_tmpl_warp':
            'coreg_to_tmpl_warp',
            'epi_0_coreg_to_tmpl_ants_mat':
            'coreg_to_tmpl_ants_mat'
        })
        study_specs.extend(
            SubStudySpec('epi_{}'.format(i), BoldStudy, epi_refspec)
            for i in range(1, epi_number))

    study_specs.extend(
        SubStudySpec('epi_{}'.format(i), BoldStudy, epi_refspec)
        for i in range(epi_number))

    for i in range(epi_number):
        inputs.append(
            InputFilesets('epi_{}_primary'.format(i),
                          epis,
                          dicom_format,
                          order=i,
                          is_regex=True))
        #     inputs.extend(InputFilesets(
        #         'epi_{}_hand_label_noise'.format(i), text_format,
        #         'hand_label_noise_{}'.format(i+1))
        #         for i in range(epi_number))
        param_specs.append(
            ParamSpec('epi_{}_fugue_echo_spacing'.format(i), echo_spacing))

    if distortion_correction:
        inputs.extend(
            InputFilesets('epi_{}_field_map_mag'.format(i),
                          fm_mag,
                          dicom_format,
                          dicom_tags={IMAGE_TYPE_TAG: MAG_IMAGE_TYPE},
                          is_regex=True,
                          order=0) for i in range(epi_number))
        inputs.extend(
            InputFilesets('epi_{}_field_map_phase'.format(i),
                          fm_phase,
                          dicom_format,
                          dicom_tags={IMAGE_TYPE_TAG: PHASE_IMAGE_TYPE},
                          is_regex=True,
                          order=0) for i in range(epi_number))
    if run_regression:
        output_files.extend('epi_{}_smoothed_ts'.format(i)
                            for i in range(epi_number))
    else:
        output_files.extend('epi_{}_fix_dir'.format(i)
                            for i in range(epi_number))

    dct['add_substudy_specs'] = study_specs
    dct['add_data_specs'] = data_specs
    dct['add_param_specs'] = param_specs
    dct['__metaclass__'] = MultiStudyMetaClass
    return (MultiStudyMetaClass(name, (MultiBoldMixin, ),
                                dct), inputs, output_files)
Exemple #7
0
class DynamicPetStudy(PetStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('pet_volumes', nifti_gz_format),
        InputFilesetSpec('regression_map', nifti_gz_format),
        FilesetSpec('pet_image', nifti_gz_format, 'Extract_vol_pipeline'),
        FilesetSpec('registered_volumes', nifti_gz_format,
                    'ApplyTransform_pipeline'),
        FilesetSpec('detrended_volumes', nifti_gz_format,
                    'Baseline_Removal_pipeline'),
        FilesetSpec('spatial_map', nifti_gz_format,
                    'Dual_Regression_pipeline'),
        FilesetSpec('ts', png_format, 'Dual_Regression_pipeline')
    ]

    add_param_specs = [
        ParamSpec('trans_template',
                  os.path.join(template_path, 'PET_template.nii.gz')),
        ParamSpec('base_remove_th', 0),
        ParamSpec('base_remove_binarize', False),
        ParamSpec('regress_th', 0),
        ParamSpec('regress_binarize', False)
    ]

    primary_scan_name = 'pet_volumes'

    def Extract_vol_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='Extract_volume',
            desc=('Extract the last volume of the 4D PET timeseries'),
            citations=[],
            **kwargs)

        pipeline.add('fslroi',
                     ExtractROI(roi_file='vol.nii.gz', t_min=79, t_size=1),
                     inputs={'in_file': ('pet_volumes', nifti_gz_format)},
                     outputs={'pet_image': ('roi_file', nifti_gz_format)})

        return pipeline

    def ApplyTransform_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='applytransform',
            desc=('Apply transformation the the 4D PET timeseries'),
            citations=[],
            **kwargs)

        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in1': ('warp_file', nifti_gz_format),
                                       'in2':
                                       ('affine_mat', text_matrix_format)
                                   })

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(reference_image=self.parameter('trans_template'),
                            interpolation='Linear',
                            input_image_type=3),
            inputs={
                'input_image': ('pet_volumes', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'registered_volumes': ('output_image', nifti_gz_format)})

        return pipeline

    def Baseline_Removal_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(name='Baseline_removal',
                                     desc=('PET dual regression'),
                                     citations=[],
                                     **kwargs)

        pipeline.add(
            'Baseline_removal',
            GlobalTrendRemoval(),
            inputs={'volume': ('registered_volumes', nifti_gz_format)},
            outputs={'detrended_volumes': ('detrended_file', nifti_gz_format)})

        return pipeline

    def Dual_Regression_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(name='Dual_regression',
                                     desc=('PET dual regression'),
                                     citations=[],
                                     **kwargs)

        pipeline.add('PET_dr',
                     PETdr(threshold=self.parameter('regress_th'),
                           binarize=self.parameter('regress_binarize')),
                     inputs={
                         'volume': ('detrended_volumes', nifti_gz_format),
                         'regression_map': ('regression_map', nifti_gz_format)
                     },
                     outputs={
                         'spatial_map': ('spatial_map', nifti_gz_format),
                         'ts': ('timecourse', png_format)
                     })

        return pipeline
Exemple #8
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    primary_scan_name = 'magnitude'

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': (self.preproc_spec_name, nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline
Exemple #9
0
class PetStudy(Study, metaclass=StudyMetaClass):

    add_param_specs = [
        ParamSpec('ica_n_components', 2),
        ParamSpec('ica_type', 'spatial'),
        ParamSpec('norm_transformation', 's'),
        ParamSpec('norm_dim', 3),
        ParamSpec('norm_template',
                  os.path.join(template_path, 'PET_template.nii.gz')),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('image_orientation_check', False)
    ]

    add_data_specs = [
        InputFilesetSpec('list_mode', list_mode_format),
        InputFilesetSpec('registered_volumes', nifti_gz_format),
        InputFilesetSpec('pet_image', nifti_gz_format),
        InputFilesetSpec('pet_data_dir', directory_format),
        InputFilesetSpec('pet_recon_dir', directory_format),
        FilesetSpec('pet_recon_dir_prepared', directory_format,
                    'pet_data_preparation_pipeline'),
        FilesetSpec('decomposed_file', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('timeseries', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('mixing_mat', text_format, 'ICA_pipeline'),
        FilesetSpec('registered_volume', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('warp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('invwarp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('affine_mat', text_matrix_format,
                    'Image_normalization_pipeline'),
        FieldSpec('pet_duration', int, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_time_info_extraction_pipeline'),
        InputFieldSpec('time_offset', int),
        InputFieldSpec('temporal_length', float),
        InputFieldSpec('num_frames', int),
        FilesetSpec('ssrb_sinograms', directory_format,
                    'sinogram_unlisting_pipeline')
    ]

    def ICA_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='ICA',
            desc=('Decompose a 4D fileset into a set of independent '
                  'components using FastICA'),
            citations=[],
            **kwargs)

        pipeline.add(
            'ICA',
            FastICA(n_components=self.parameter('ica_n_components'),
                    ica_type=self.parameter('ica_type')),
            inputs={'volume': ('registered_volumes', nifti_gz_format)},
            ouputs={
                'decomposed_file': ('ica_decomposition', nifti_gz_format),
                'timeseries': ('ica_timeseries', nifti_gz_format),
                'mixing_mat': ('mixing_mat', text_format)
            })

        return pipeline

    def Image_normalization_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='Image_registration',
            desc=('Image registration to a template using ANTs'),
            citations=[],
            **kwargs)

        pipeline.add('ANTs',
                     AntsRegSyn(
                         out_prefix='vol2template',
                         num_dimensions=self.parameter('norm_dim'),
                         num_threads=self.processor.num_processes,
                         transformation=self.parameter('norm_transformation'),
                         ref_file=self.parameter('norm_template')),
                     inputs={'input_file': ('pet_image', nifti_gz_format)},
                     ouputs={
                         'registered_volume': ('reg_file', nifti_gz_format),
                         'warp_file': ('warp_file', nifti_gz_format),
                         'invwarp_file': ('inv_warp', nifti_gz_format),
                         'affine_mat': ('regmat', text_matrix_format)
                     })

        return pipeline

    def pet_data_preparation_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_data_preparation',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will prepare the data for the motion "
                  "correction"),
            citations=[],
            **kwargs)

        pipeline.add('prepare_pet',
                     PreparePetDir(image_orientation_check=self.parameter(
                         'image_orientation_check')),
                     inputs={'pet_dir': ('pet_recon_dir', directory_format)},
                     ouputs={
                         'pet_recon_dir_prepared':
                         ('pet_dir_prepared', directory_format)
                     },
                     requirements=[mrtrix_req.v('3.0rc3'),
                                   fsl_req.v('5.0.9')])

        return pipeline

    def pet_time_info_extraction_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_info_extraction',
            desc=("Extract PET time info from list-mode header."),
            citations=[],
            **kwargs)

        pipeline.add(
            'PET_time_info',
            PetTimeInfo(),
            inputs={'pet_data_dir': ('pet_data_dir', directory_format)},
            ouputs={
                'pet_end_time': ('pet_end_time', float),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int)
            })
        return pipeline

    def sinogram_unlisting_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='prepare_sinogram',
            desc=('Unlist pet listmode data into several sinograms and '
                  'perform ssrb compression to prepare data for motion '
                  'detection using PCA pipeline.'),
            citations=[],
            **kwargs)

        if not self.provided('list_mode'):
            raise BananaUsageError(
                "'list_mode' was not provided as an input to the study "
                "so cannot perform sinogram unlisting")

        prepare_inputs = pipeline.add('prepare_inputs',
                                      PrepareUnlistingInputs(),
                                      inputs={
                                          'list_mode':
                                          ('list_mode', list_mode_format),
                                          'time_offset': ('time_offset', int),
                                          'num_frames': ('num_frames', int),
                                          'temporal_len':
                                          ('temporal_length', float)
                                      })

        unlisting = pipeline.add(
            'unlisting',
            PETListModeUnlisting(),
            inputs={'list_inputs': (prepare_inputs, 'out')},
            iterfield=['list_inputs'])

        ssrb = pipeline.add(
            'ssrb',
            SSRB(),
            inputs={'unlisted_sinogram': (unlisting, 'pet_sinogram')},
            requirements=[stir_req.v('3.0')])

        pipeline.add(
            'merge_sinograms',
            MergeUnlistingOutputs(),
            inputs={'sinograms': (ssrb, 'ssrb_sinograms')},
            ouputs={'ssrb_sinograms': ('sinogram_folder', directory_format)},
            joinsource='unlisting',
            joinfield=['sinograms'])

        return pipeline
Exemple #10
0
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):

    desc = "Diffusion-weighted MRI contrast"

    add_data_specs = [
        InputFilesetSpec('anat_5tt', mrtrix_image_format,
                         desc=("A co-registered segmentation image taken from "
                               "freesurfer output and simplified into 5 tissue"
                               " types. Used in ACT streamlines tractography"),
                         optional=True),
        InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
                         desc=("Co-registered freesurfer recon-all output. "
                               "Used in building the connectome")),
        InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
        FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
        FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
                    'series_coreg_pipeline',
                    desc=("The gradient directions coregistered to the "
                          "orientation of the coreg reference")),
        FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('noise_residual', mrtrix_image_format,
                    'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
                    desc=("")),
        FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
                    desc=("")),
        FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
                    desc=("")),
        FilesetSpec('wm_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('gm_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('csf_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('avg_response', text_format, 'average_response_pipeline',
                    desc=("")),
        FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('norm_intensity', mrtrix_image_format,
                    'intensity_normalisation_pipeline',
                    desc=("")),
        FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
                    'intensity_normalisation_pipeline', frequency='per_study',
                    desc=("")),
        FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
                    'intensity_normalisation_pipeline', frequency='per_study',
                    desc=("")),
        FilesetSpec('global_tracks', mrtrix_track_format,
                    'global_tracking_pipeline',
                    desc=("")),
        FilesetSpec('wm_mask', mrtrix_image_format,
                    'global_tracking_pipeline',
                    desc=("")),
        FilesetSpec('connectome', csv_format, 'connectome_pipeline',
                    desc=(""))]

    add_param_specs = [
        ParamSpec('multi_tissue', True,
                  desc=("")),
        ParamSpec('preproc_pe_dir', None, dtype=str,
                  desc=("")),
        ParamSpec('tbss_skel_thresh', 0.2,
                  desc=("")),
        ParamSpec('fsl_mask_f', 0.25,
                  desc=("")),
        ParamSpec('bet_robust', True,
                  desc=("")),
        ParamSpec('bet_f_threshold', 0.2,
                  desc=("")),
        ParamSpec('bet_reduce_bias', False,
                  desc=("")),
        ParamSpec('num_global_tracks', int(1e9),
                  desc=("")),
        ParamSpec('global_tracks_cutoff', 0.05,
                  desc=("")),
        SwitchSpec('preproc_denoise', False,
                   desc=("")),
        SwitchSpec('response_algorithm', 'tax',
                   ('tax', 'dhollander', 'msmt_5tt'),
                   desc=("")),
        SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
                   desc=("")),
        MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
        SwitchSpec('reorient2std', False,
                   desc=(""))]

    primary_bids_input = BidsInputs(
        spec_name='series', type='dwi',
        valid_formats=(nifti_gz_x_format, nifti_gz_format))

    default_bids_inputs = [primary_bids_input,
                           BidsAssocInputs(
                               spec_name='bvalues',
                               primary=primary_bids_input,
                               association='grads',
                               type='bval',
                               format=fsl_bvals_format),
                           BidsAssocInputs(
                               spec_name='grad_dirs',
                               primary=primary_bids_input,
                               association='grads',
                               type='bvec',
                               format=fsl_bvecs_format),
                           BidsAssocInputs(
                               spec_name='reverse_phase',
                               primary=primary_bids_input,
                               association='epi',
                               format=nifti_gz_format,
                               drop_if_missing=True)]

    RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5

    primary_scan_name = 'series'

    @property
    def multi_tissue(self):
        return self.branch('response_algorithm',
                           ('msmt_5tt', 'dhollander'))

    def fsl_grads(self, pipeline, coregistered=True):
        "Adds and returns a node to the pipeline to merge the FSL grads and "
        "bvecs"
        try:
            fslgrad = pipeline.node('fslgrad')
        except ArcanaNameError:
            if self.is_coregistered and coregistered:
                grad_dirs = 'grad_dirs_coreg'
            else:
                grad_dirs = 'grad_dirs'
            # Gradient merge node
            fslgrad = pipeline.add(
                "fslgrad",
                MergeTuple(2),
                inputs={
                    'in1': (grad_dirs, fsl_bvecs_format),
                    'in2': ('bvalues', fsl_bvals_format)})
        return (fslgrad, 'out')

    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts the first b==0 volume from the series",
            citations=[],
            name_maps=name_maps)

        dwiextract = pipeline.add(
            'dwiextract',
            ExtractDWIorB0(
                bzero=True,
                out_ext='.nii.gz'),
            inputs={
                'in_file': ('series', nifti_gz_format),
                'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
            requirements=[mrtrix_req.v('3.0rc3')])

        pipeline.add(
            "extract_first_vol",
            MRConvert(
                coord=(3, 0)),
            inputs={
                'in_file': (dwiextract, 'out_file')},
            outputs={
                'magnitude': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def preprocess_pipeline(self, **name_maps):
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite, n4_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            citations=references)

        # Create nodes to gradients to FSL format
        if self.input('series').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('series', dicom_format)},
                outputs={
                    'grad_dirs': ('bvecs_file', fsl_bvecs_format),
                    'bvalues': ('bvals_file', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
                               'in2': (extract_grad, 'bvals_file')}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
                               'in2': ('bvalues', fsl_bvals_format)}
        else:
            raise BananaUsageError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            inputs=grad_fsl_inputs)

        gradients = (grad_fsl, 'out')

        # Create node to reorient preproc out_file
        if self.branch('reorient2std'):
            reorient = pipeline.add(
                'fslreorient2std',
                fsl.utils.Reorient2Std(
                    output_type='NIFTI_GZ'),
                inputs={
                    'in_file': ('series', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.9')])
            reoriented = (reorient, 'out_file')
        else:
            reoriented = ('series', nifti_gz_format)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': reoriented},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': reoriented,
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                inputs={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'noise_residual': ('out_file', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            denoised = (denoise, 'out_file')
        else:
            denoised = reoriented

        # Preproc kwargs
        preproc_kwargs = {}
        preproc_inputs = {'in_file': denoised,
                          'grad_fsl': gradients}

        if self.provided('reverse_phase'):

            if self.provided('magnitude', default_okay=False):
                dwi_reference = ('magnitude', mrtrix_image_format)
            else:
                # Extract b=0 volumes
                dwiextract = pipeline.add(
                    'dwiextract',
                    ExtractDWIorB0(
                        bzero=True,
                        out_ext='.nii.gz'),
                    inputs={
                        'in_file': denoised,
                        'fslgrad': gradients},
                    requirements=[mrtrix_req.v('3.0rc3')])

                # Get first b=0 from dwi b=0 volumes
                extract_first_b0 = pipeline.add(
                    "extract_first_vol",
                    MRConvert(
                        coord=(3, 0)),
                    inputs={
                        'in_file': (dwiextract, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])

                dwi_reference = (extract_first_b0, 'out_file')

            # Concatenate extracted forward rpe with reverse rpe
            combined_images = pipeline.add(
                'combined_images',
                MRCat(),
                inputs={
                    'first_scan': dwi_reference,
                    'second_scan': ('reverse_phase', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_inputs['se_epi'] = (combined_images, 'out_file')
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            inputs=preproc_inputs,
            outputs={
                'eddy_par': ('eddy_parameters', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60)

        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        mask = pipeline.add(
            'dwi2mask',
            BrainMask(
                out_file='brainmask.nii.gz'),
            inputs={
                'in_file': (preproc, 'out_file'),
                'grad_fsl': gradients},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Create bias correct node
        pipeline.add(
            "bias_correct",
            DWIBiasCorrect(
                method='ants'),
            inputs={
                'grad_fsl': gradients,  # internal
                'in_file': (preproc, 'out_file'),
                'mask': (mask, 'out_file')},
            outputs={
                'series_preproc': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])

        return pipeline

    def brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('bet_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                citations=[mrtrix_cite],
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                series = 'series_coreg'
            else:
                series = 'series_preproc'

            # Create mask node
            masker = pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': (series, nifti_gz_format),
                    'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
                outputs={
                    'brain_mask': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            merge = pipeline.add(
                'merge_operands',
                Merge(2),
                inputs={
                    'in1': ('mag_preproc', nifti_gz_format),
                    'in2': (masker, 'out_file')})

            pipeline.add(
                'apply_mask',
                MRCalc(
                    operation='multiply'),
                inputs={
                    'operands': (merge, 'out')},
                outputs={
                    'brain': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
        else:
            pipeline = super().brain_extraction_pipeline(**name_maps)
        return pipeline

    def series_coreg_pipeline(self, **name_maps):

        pipeline = super().series_coreg_pipeline(**name_maps)

        # Apply coregistration transform to gradients
        pipeline.add(
            'transform_grads',
            TransformGradients(),
            inputs={
                'gradients': ('grad_dirs', fsl_bvecs_format),
                'transform': ('coreg_fsl_mat', text_matrix_format)},
            outputs={
                'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})

        return pipeline

    def intensity_normalisation_pipeline(self, **name_maps):

        if self.num_sessions < 2:
            raise ArcanaMissingDataException(
                "Cannot normalise intensities of DWI images as study only "
                "contains a single session")
        elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
            logger.warning(
                "The number of sessions in the study ({}) is less than the "
                "recommended number for intensity normalisation ({}). The "
                "results may be unreliable".format(
                    self.num_sessions,
                    self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))

        pipeline = self.new_pipeline(
            name='intensity_normalization',
            desc="Corrects for B1 field inhomogeneity",
            citations=[mrtrix_req.v('3.0rc3')],
            name_maps=name_maps)

        mrconvert = pipeline.add(
            'mrconvert',
            MRConvert(
                out_ext='.mif'),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Pair subject and visit ids together, expanding so they can be
        # joined and chained together
        session_ids = pipeline.add(
            'session_ids',
            utility.IdentityInterface(
                ['subject_id', 'visit_id']),
            inputs={
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)})

        # Set up join nodes
        join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
        join_over_subjects = pipeline.add(
            'join_over_subjects',
            utility.IdentityInterface(
                join_fields),
            inputs={
                'masks': (self.brain_mask_spec_name, nifti_gz_format),
                'dwis': (mrconvert, 'out_file'),
                'subject_ids': (session_ids, 'subject_id'),
                'visit_ids': (session_ids, 'visit_id')},
            joinsource=self.SUBJECT_ID,
            joinfield=join_fields)

        join_over_visits = pipeline.add(
            'join_over_visits',
            Chain(
                join_fields),
            inputs={
                'dwis': (join_over_subjects, 'dwis'),
                'masks': (join_over_subjects, 'masks'),
                'subject_ids': (join_over_subjects, 'subject_ids'),
                'visit_ids': (join_over_subjects, 'visit_ids')},
            joinsource=self.VISIT_ID,
            joinfield=join_fields)

        # Intensity normalization
        intensity_norm = pipeline.add(
            'dwiintensitynorm',
            DWIIntensityNorm(),
            inputs={
                'in_files': (join_over_visits, 'dwis'),
                'masks': (join_over_visits, 'masks')},
            outputs={
                'norm_intens_fa_template': ('fa_template',
                                            mrtrix_image_format),
                'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Set up expand nodes
        pipeline.add(
            'expand', SelectSession(),
            inputs={
                'subject_ids': (join_over_visits, 'subject_ids'),
                'visit_ids': (join_over_visits, 'visit_ids'),
                'inlist': (intensity_norm, 'out_files'),
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)},
            outputs={
                'norm_intensity': ('item', mrtrix_image_format)})

        # Connect inputs
        return pipeline

    def tensor_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='tensor',
            desc=("Estimates the apparent diffusion tensor in each "
                  "voxel"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'dwi2tensor',
            FitTensor(
                out_file='dti.nii.gz'),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'tensor': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def tensor_metrics_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='fa',
            desc=("Calculates the FA and ADC from a tensor image"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'metrics',
            TensorMetrics(
                out_fa='fa.nii.gz',
                out_adc='adc.nii.gz'),
            inputs={
                'in_file': ('tensor', nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'fa': ('out_fa', nifti_gz_format),
                'adc': ('out_adc', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def response_pipeline(self, **name_maps):
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        response_algorithm : str
            Algorithm used to estimate the response
        """

        pipeline = self.new_pipeline(
            name='response',
            desc=("Estimates the fibre response function"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Create fod fit node
        response = pipeline.add(
            'response',
            ResponseSD(
                algorithm=self.parameter('response_algorithm')),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'wm_response': ('wm_file', text_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Connect to outputs
        if self.multi_tissue:
            response.inputs.gm_file = 'gm.txt',
            response.inputs.csf_file = 'csf.txt',
            pipeline.connect_output('gm_response', response, 'gm_file',
                                    text_format)
            pipeline.connect_output('csf_response', response, 'csf_file',
                                    text_format)

        return pipeline

    def average_response_pipeline(self, **name_maps):
        """
        Averages the estimate response function over all subjects in the
        project
        """

        pipeline = self.new_pipeline(
            name='average_response',
            desc=(
                "Averages the fibre response function over the project"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        join_subjects = pipeline.add(
            'join_subjects',
            utility.IdentityInterface(['responses']),
            inputs={
                'responses': ('wm_response', text_format)},
            outputs={},
            joinsource=self.SUBJECT_ID,
            joinfield=['responses'])

        join_visits = pipeline.add(
            'join_visits',
            Chain(['responses']),
            inputs={
                'responses': (join_subjects, 'responses')},
            joinsource=self.VISIT_ID,
            joinfield=['responses'])

        pipeline.add(
            'avg_response',
            AverageResponse(),
            inputs={
                'in_files': (join_visits, 'responses')},
            outputs={
                'avg_response': ('out_file', text_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def fod_pipeline(self, **name_maps):
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        """

        pipeline = self.new_pipeline(
            name='fod',
            desc=("Estimates the fibre orientation distribution in each"
                  " voxel"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Create fod fit node
        dwi2fod = pipeline.add(
            'dwi2fod',
            EstimateFOD(
                algorithm=self.parameter('fod_algorithm')),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'wm_txt': ('wm_response', text_format),
                'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)},
            outputs={
                'wm_odf': ('wm_odf', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.multi_tissue:
            dwi2fod.inputs.gm_odf = 'gm.mif',
            dwi2fod.inputs.csf_odf = 'csf.mif',
            pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
                                   text_format),
            pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
                                   text_format),
            pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
                                    nifti_gz_format),
            pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
                                    nifti_gz_format),
        # Check inputs/output are connected
        return pipeline

    def extract_b0_pipeline(self, **name_maps):
        """
        Extracts the b0 images from a DWI study and takes their mean
        """

        pipeline = self.new_pipeline(
            name='extract_b0',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Extraction node
        extract_b0s = pipeline.add(
            'extract_b0s',
            ExtractDWIorB0(
                bzero=True,
                quiet=True),
            inputs={
                'fslgrad': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # FIXME: Need a registration step before the mean
        # Mean calculation node
        mean = pipeline.add(
            "mean",
            MRMath(
                axis=3,
                operation='mean',
                quiet=True),
            inputs={
                'in_files': (extract_b0s, 'out_file')},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Convert to Nifti
        pipeline.add(
            "output_conversion",
            MRConvert(
                out_ext='.nii.gz',
                quiet=True),
            inputs={
                'in_file': (mean, 'out_file')},
            outputs={
                'b0': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def global_tracking_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='global_tracking',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        mask = pipeline.add(
            'mask',
            DWI2Mask(),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        tracking = pipeline.add(
            'tracking',
            Tractography(
                select=self.parameter('num_global_tracks'),
                cutoff=self.parameter('global_tracks_cutoff')),
            inputs={
                'seed_image': (mask, 'out_file'),
                'in_file': ('wm_odf', mrtrix_image_format)},
            outputs={
                'global_tracks': ('out_file', mrtrix_track_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.provided('anat_5tt'):
            pipeline.connect_input('anat_5tt', tracking, 'act_file',
                                   mrtrix_image_format)

        return pipeline

    def intrascan_alignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='affine_mat_generation',
            desc=("Generation of the affine matrices for the main dwi "
                  "sequence starting from eddy motion parameters"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'gen_aff_mats',
            AffineMatrixGeneration(),
            inputs={
                'reference_image': ('mag_preproc', nifti_gz_format),
                'motion_parameters': ('eddy_par', eddy_par_format)},
            outputs={
                'align_mats': ('affine_matrices', motion_mats_format)})

        return pipeline

    def connectome_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='connectome',
            desc=("Generate a connectome from whole brain connectivity"),
            citations=[],
            name_maps=name_maps)

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(
                sub_paths=['mri', 'aparc+aseg.mgz']),
            inputs={
                'base_path': ('anat_fs_recon_all', directory_format)})

        pipeline.add(
            'connectome',
            mrtrix3.BuildConnectome(),
            inputs={
                'in_file': ('global_tracks', mrtrix_track_format),
                'in_parc': (aseg_path, 'out_path')},
            outputs={
                'connectome': ('out_file', csv_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Exemple #11
0
class EpiStudy(MriStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('coreg_ref_wmseg', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_mag', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_phase', STD_IMAGE_FORMATS,
                         optional=True),
        FieldSpec('field_map_delta_te', float,
                  'field_map_time_info_pipeline')]

    add_param_specs = [
        SwitchSpec('bet_robust', True),
        ParamSpec('bet_f_threshold', 0.2),
        ParamSpec('bet_reduce_bias', False),
        ParamSpec('fugue_echo_spacing', 0.000275)]

    def preprocess_pipeline(self, **name_maps):

        if ('field_map_phase' in self.input_names and
                'field_map_mag' in self.input_names):
            return self._fugue_pipeline(**name_maps)
        else:
            return super().preprocess_pipeline(**name_maps)

    def _fugue_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Fugue distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('magnitude', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_mag_reorient = pipeline.add(
            'reorient_fm_mag',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('field_map_mag', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_phase_reorient = pipeline.add(
            'reorient_fm_phase',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('field_map_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        bet = pipeline.add(
            "bet",
            BET(
                robust=True,
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (fm_mag_reorient, 'out_file')},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        create_fmap = pipeline.add(
            "prepfmap",
            PrepareFieldmap(
                # delta_TE=2.46
            ),
            inputs={
                'delta_TE': ('field_map_delta_te', float),
                "in_magnitude": (bet, "out_file"),
                'in_phase': (fm_phase_reorient, 'out_file')},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add(
            'fugue',
            FUGUE(
                unwarp_direction='x',
                dwell_time=self.parameter('fugue_echo_spacing'),
                unwarped_file='example_func.nii.gz',
                output_type='NIFTI_GZ'),
            inputs={
                'fmap_in_file': (create_fmap, 'out_fieldmap'),
                'in_file': (reorient_epi_in, 'out_file')},
            outputs={
                'mag_preproc': ('unwarped_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def field_map_time_info_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='field_map_time_info_pipeline',
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'extract_delta_te',
            FieldMapTimeInfo(),
            inputs={
                'fm_mag': ('field_map_mag', dicom_format)},
            outputs={
                'field_map_delta_te': ('delta_te', float)})

        return pipeline
Exemple #12
0
def create_motion_correction_class(name,
                                   ref=None,
                                   ref_type=None,
                                   t1s=None,
                                   t2s=None,
                                   dwis=None,
                                   epis=None,
                                   umap=None,
                                   dynamic=False,
                                   umap_ref=None,
                                   pet_data_dir=None,
                                   pet_recon_dir=None,
                                   struct2align=None):

    inputs = []
    dct = {}
    data_specs = []
    run_pipeline = False
    param_specs = [ParamSpec('ref_resampled_resolution', [1])]
    switch_specs = []
    if struct2align is not None:
        struct_image = struct2align.split('/')[-1].split('.')[0]

    if pet_data_dir is not None:
        inputs.append(
            InputFilesets('pet_data_dir', 'pet_data_dir', directory_format))
    if pet_recon_dir is not None:
        inputs.append(
            InputFilesets('pet_data_reconstructed', 'pet_data_reconstructed',
                          directory_format))
        if struct2align is not None:
            inputs.append(
                InputFilesets('struct2align', struct_image, nifti_gz_format))
    if pet_data_dir is not None and pet_recon_dir is not None and dynamic:
        output_data = 'dynamic_motion_correction_results'
        param_specs.append(ParamSpec('dynamic_pet_mc', True))
        if struct2align is not None:
            inputs.append(
                InputFilesets('struct2align', struct_image, nifti_gz_format))
    elif (pet_recon_dir is not None and not dynamic):
        output_data = 'static_motion_correction_results'
    else:
        output_data = 'motion_detection_output'

    if not ref:
        raise Exception('A reference image must be provided!')
    if ref_type == 't1':
        ref_study = T1Study
    elif ref_type == 't2':
        ref_study = T2Study
    else:
        raise Exception('{} is not a recognized ref_type!The available '
                        'ref_types are t1 or t2.'.format(ref_type))

    study_specs = [SubStudySpec('ref', ref_study)]
    ref_spec = {'ref_brain': 'coreg_ref_brain'}
    inputs.append(InputFilesets('ref_primary', ref, dicom_format))

    if umap_ref and umap:
        if umap_ref.endswith('/'):
            umap_ref = umap_ref.split('/')[-2]
        else:
            umap_ref = umap_ref.split('/')[-1]
        if umap_ref in t1s:
            umap_ref_study = T1Study
            t1s.remove(umap_ref)
        elif umap_ref in t2s:
            umap_ref_study = T2Study
            t2s.remove(umap_ref)
        else:
            umap_ref = None

    if t1s:
        study_specs.extend([
            SubStudySpec('t1_{}'.format(i), T1Study, ref_spec)
            for i in range(len(t1s))
        ])
        inputs.extend(
            InputFilesets('t1_{}_primary'.format(i), dicom_format, t1_scan)
            for i, t1_scan in enumerate(t1s))
        run_pipeline = True

    if t2s:
        study_specs.extend([
            SubStudySpec('t2_{}'.format(i), T2Study, ref_spec)
            for i in range(len(t2s))
        ])
        inputs.extend(
            InputFilesets('t2_{}_primary'.format(i), t2_scan, dicom_format)
            for i, t2_scan in enumerate(t2s))
        run_pipeline = True

    if umap_ref and not umap:
        logger.info(
            'Umap not provided. The umap realignment will not be '
            'performed. Umap_ref will be trated as {}'.format(umap_ref_study))

    elif umap_ref and umap:
        logger.info('Umap will be realigned to match the head position in '
                    'each frame.')
        if type(umap) == list and len(umap) > 1:
            logger.info('More than one umap provided. Only the first one will '
                        'be used.')
            umap = umap[0]
        study_specs.append(SubStudySpec('umap_ref', umap_ref_study, ref_spec))
        inputs.append(InputFilesets('umap_ref_primary', dicom_format,
                                    umap_ref))
        inputs.append(InputFilesets('umap', dicom_format, umap))

        run_pipeline = True

    elif not umap_ref and umap:
        logger.warning('Umap provided without corresponding reference image. '
                       'Realignment cannot be performed without umap_ref. Umap'
                       ' will be ignored.')

    if epis:
        epi_refspec = ref_spec.copy()
        epi_refspec.update({
            'ref_wm_seg': 'coreg_ref_wmseg',
            'ref_preproc': 'coreg_ref'
        })
        study_specs.extend(
            SubStudySpec('epi_{}'.format(i), EpiSeriesStudy, epi_refspec)
            for i in range(len(epis)))
        inputs.extend(
            InputFilesets('epi_{}_primary'.format(i), epi_scan, dicom_format)
            for i, epi_scan in enumerate(epis))
        run_pipeline = True
    if dwis:
        unused_dwi = []
        dwis_main = [x for x in dwis if x[-1] == '0']
        dwis_ref = [x for x in dwis if x[-1] == '1']
        dwis_opposite = [x for x in dwis if x[-1] == '-1']
        dwi_refspec = ref_spec.copy()
        dwi_refspec.update({
            'ref_wm_seg': 'coreg_ref_wmseg',
            'ref_preproc': 'coreg_ref'
        })
        if dwis_main:
            switch_specs.extend(
                SwitchSpec('dwi_{}_brain_extract_method'.format(i), 'fsl', (
                    'mrtrix', 'fsl')) for i in range(len(dwis_main)))
        if dwis_main and not dwis_opposite:
            logger.warning(
                'No opposite phase encoding direction b0 provided. DWI '
                'motion correction will be performed without distortion '
                'correction. THIS IS SUB-OPTIMAL!')
            study_specs.extend(
                SubStudySpec('dwi_{}'.format(i), DwiStudy, dwi_refspec)
                for i in range(len(dwis_main)))
            inputs.extend(
                InputFilesets('dwi_{}_primary'.format(i), dwis_main_scan[0],
                              dicom_format)
                for i, dwis_main_scan in enumerate(dwis_main))
        if dwis_main and dwis_opposite:
            study_specs.extend(
                SubStudySpec('dwi_{}'.format(i), DwiStudy, dwi_refspec)
                for i in range(len(dwis_main)))
            inputs.extend(
                InputFilesets('dwi_{}_primary'.format(i), dwis_main[i][0],
                              dicom_format) for i in range(len(dwis_main)))
            if len(dwis_main) <= len(dwis_opposite):
                inputs.extend(
                    InputFilesets('dwi_{}_dwi_reference'.format(i),
                                  dwis_opposite[i][0], dicom_format)
                    for i in range(len(dwis_main)))
            else:
                inputs.extend(
                    InputFilesets('dwi_{}_dwi_reference'.format(i),
                                  dwis_opposite[0][0], dicom_format)
                    for i in range(len(dwis_main)))
        if dwis_opposite and dwis_main and not dwis_ref:
            study_specs.extend(
                SubStudySpec('b0_{}'.format(i), EpiSeriesStudy, dwi_refspec)
                for i in range(len(dwis_opposite)))
            inputs.extend(
                InputFilesets('b0_{}_primary'.format(i), dwis_opposite[i][0],
                              dicom_format) for i in range(len(dwis_opposite)))
            if len(dwis_opposite) <= len(dwis_main):
                inputs.extend(
                    InputFilesets('b0_{}_reverse_phase'.format(i), dwis_main[i]
                                  [0], dicom_format)
                    for i in range(len(dwis_opposite)))
            else:
                inputs.extend(
                    InputFilesets('b0_{}_reverse_phase'.format(i), dwis_main[0]
                                  [0], dicom_format)
                    for i in range(len(dwis_opposite)))
        elif dwis_opposite and dwis_ref:
            min_index = min(len(dwis_opposite), len(dwis_ref))
            study_specs.extend(
                SubStudySpec('b0_{}'.format(i), EpiSeriesStudy, dwi_refspec)
                for i in range(min_index * 2))
            inputs.extend(
                InputFilesets('b0_{}_primary'.format(i), scan[0], dicom_format)
                for i, scan in enumerate(dwis_opposite[:min_index] +
                                         dwis_ref[:min_index]))
            inputs.extend(
                InputFilesets('b0_{}_reverse_phase'.format(i), scan[0],
                              dicom_format)
                for i, scan in enumerate(dwis_ref[:min_index] +
                                         dwis_opposite[:min_index]))
            unused_dwi = [
                scan
                for scan in dwis_ref[min_index:] + dwis_opposite[min_index:]
            ]
        elif dwis_opposite or dwis_ref:
            unused_dwi = [scan for scan in dwis_ref + dwis_opposite]
        if unused_dwi:
            logger.info(
                'The following scans:\n{}\nwere not assigned during the DWI '
                'motion detection initialization (probably a different number '
                'of main DWI scans and b0 images was provided). They will be '
                'processed os "other" scans.'.format('\n'.join(
                    s[0] for s in unused_dwi)))
            study_specs.extend(
                SubStudySpec('t2_{}'.format(i), T2Study, ref_spec)
                for i in range(len(t2s),
                               len(t2s) + len(unused_dwi)))
            inputs.extend(
                InputFilesets('t2_{}_primary'.format(i), scan[0], dicom_format)
                for i, scan in enumerate(unused_dwi, start=len(t2s)))
        run_pipeline = True

    if not run_pipeline:
        raise Exception('At least one scan, other than the reference, must be '
                        'provided!')

    dct['add_substudy_specs'] = study_specs
    dct['add_data_specs'] = data_specs
    dct['__metaclass__'] = MultiStudyMetaClass
    dct['add_param_specs'] = param_specs
    dct['add_switch_specs'] = switch_specs
    return (MultiStudyMetaClass(name, (MotionDetectionMixin, ),
                                dct), inputs, output_data)
Exemple #13
0
class EpiSeriesStudy(MriStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('series',
                         STD_IMAGE_FORMATS,
                         desc=("The set of EPI volumes that make up the "
                               "series")),
        InputFilesetSpec('coreg_ref_wmseg', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('field_map_mag', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('field_map_phase', STD_IMAGE_FORMATS, optional=True),
        FilesetSpec('magnitude',
                    nifti_gz_format,
                    'extract_magnitude_pipeline',
                    desc=("The magnitude image, typically extracted from "
                          "the provided series")),
        FilesetSpec('series_preproc', nifti_gz_format, 'preprocess_pipeline'),
        FilesetSpec('series_coreg', nifti_gz_format, 'series_coreg_pipeline'),
        FilesetSpec('moco', nifti_gz_format, 'intrascan_alignment_pipeline'),
        FilesetSpec('align_mats', motion_mats_format,
                    'intrascan_alignment_pipeline'),
        FilesetSpec('moco_par', par_format, 'intrascan_alignment_pipeline'),
        FieldSpec('field_map_delta_te', float, 'field_map_time_info_pipeline')
    ]

    add_param_specs = [
        SwitchSpec('bet_robust', True),
        MriStudy.param_spec('coreg_method').with_new_choices(
            'epireg', fallbacks={'epireg': 'flirt'}),
        ParamSpec('bet_f_threshold', 0.2),
        ParamSpec('bet_reduce_bias', False),
        ParamSpec('fugue_echo_spacing', 0.000275)
    ]

    @property
    def header_image_spec_name(self):
        if self.provided('header_image'):
            hdr_name = 'header_image'
        else:
            hdr_name = 'series'
        return hdr_name

    @property
    def series_preproc_spec_name(self):
        if self.is_coregistered:
            preproc = 'series_coreg'
        else:
            preproc = 'series_preproc'
        return preproc

    def coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self._epireg_linear_coreg_pipeline(**name_maps)
        else:
            pipeline = super().coreg_pipeline(**name_maps)
        return pipeline

    def _epireg_linear_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='linear_coreg',
            desc=("Intra-subjects epi registration improved using white "
                  "matter boundaries."),
            citations=[fsl_cite],
            name_maps=name_maps)

        epireg = pipeline.add('epireg',
                              fsl.epi.EpiReg(out_base='epireg2ref',
                                             output_type='NIFTI_GZ'),
                              inputs={
                                  'epi': ('brain', nifti_gz_format),
                                  't1_brain':
                                  ('coreg_ref_brain', nifti_gz_format),
                                  't1_head': ('coreg_ref', nifti_gz_format)
                              },
                              outputs={
                                  'brain_coreg': ('out_file', nifti_gz_format),
                                  'coreg_fsl_mat':
                                  ('epi2str_mat', text_matrix_format)
                              },
                              requirements=[fsl_req.v('5.0.9')])

        if self.provided('coreg_ref_wmseg'):
            pipeline.connect_input('coreg_ref_wmseg', epireg, 'wmseg',
                                   nifti_gz_format)

        return pipeline

    def brain_coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(input_map={
                    'mag_preproc': 'brain',
                    'coreg_ref': 'coreg_ref_brain'
                },
                               output_map={'mag_coreg': 'brain_coreg'},
                               name_maps=name_maps))

            pipeline.add(
                'mask_transform',
                fsl.ApplyXFM(output_type='NIFTI_GZ', apply_xfm=True),
                inputs={
                    'in_matrix_file': (pipeline.node('epireg'), 'epi2str_mat'),
                    'in_file': ('brain_mask', nifti_gz_format),
                    'reference': ('coreg_ref_brain', nifti_gz_format)
                },
                outputs={'brain_mask_coreg': ('out_file', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.10')],
                wall_time=10)
        else:
            pipeline = super().coreg_brain_pipeline(**name_maps)

        return pipeline

    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts a single magnitude volume from a series",
            citations=[],
            name_maps=name_maps)

        pipeline.add("extract_first_vol",
                     MRConvert(coord=(3, 0)),
                     inputs={'in_file': ('series', nifti_gz_format)},
                     outputs={'magnitude': ('out_file', nifti_gz_format)},
                     requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def series_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'series_coreg',
            desc="Applies coregistration transform to DW series",
            citations=[],
            name_maps=name_maps)

        if self.provided('coreg_ref'):
            coreg_ref = 'coreg_ref'
        elif self.provided('coreg_ref_brain'):
            coreg_ref = 'coreg_ref_brain'
        else:
            raise BananaUsageError(
                "Cannot coregister DW series as reference ('coreg_ref' or "
                "'coreg_ref_brain') has not been provided to {}".format(self))

        # Apply co-registration transformation to DW series
        pipeline.add('mask_transform',
                     fsl.ApplyXFM(output_type='NIFTI_GZ', apply_xfm=True),
                     inputs={
                         'in_matrix_file':
                         ('coreg_fsl_mat', text_matrix_format),
                         'in_file': ('series_preproc', nifti_gz_format),
                         'reference': (coreg_ref, nifti_gz_format)
                     },
                     outputs={'series_coreg': ('out_file', nifti_gz_format)},
                     requirements=[fsl_req.v('5.0.10')],
                     wall_time=10)

        return pipeline

    def intrascan_alignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='MCFLIRT_pipeline',
                                     desc=("Intra-epi volumes alignment."),
                                     citations=[fsl_cite],
                                     name_maps=name_maps)

        mcflirt = pipeline.add(
            'mcflirt',
            fsl.MCFLIRT(ref_vol=0,
                        save_mats=True,
                        save_plots=True,
                        output_type='NIFTI_GZ',
                        out_file='moco.nii.gz'),
            inputs={'in_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'moco': ('out_file', nifti_gz_format),
                'moco_par': ('par_file', par_format)
            },
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add('merge',
                     MergeListMotionMat(),
                     inputs={'file_list': (mcflirt, 'mat_file')},
                     outputs={'align_mats': ('out_dir', motion_mats_format)})

        return pipeline

    def field_map_time_info_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='field_map_time_info_pipeline',
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('extract_delta_te',
                     FieldMapTimeInfo(),
                     inputs={'fm_mag': ('field_map_mag', dicom_format)},
                     outputs={'field_map_delta_te': ('delta_te', float)})

        return pipeline

    def preprocess_pipeline(self, **name_maps):

        if ('field_map_phase' in self.input_names
                and 'field_map_mag' in self.input_names):
            return self._fugue_pipeline(**name_maps)
        elif 'reverse_phase' in self.input_names:
            return self._topup_pipeline(**name_maps)
        else:
            return super().preprocess_pipeline(**name_maps)

    def _topup_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Topup distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(),
            inputs={'in_file': ('series', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        reorient_epi_opposite = pipeline.add(
            'reorient_epi_opposite',
            fsl.utils.Reorient2Std(),
            inputs={'in_file': ('reverse_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        prep_dwi = pipeline.add('prepare_dwi',
                                PrepareDWI(topup=True),
                                inputs={
                                    'pe_dir': ('ped', str),
                                    'ped_polarity': ('pe_angle', str),
                                    'dwi': (reorient_epi_in, 'out_file'),
                                    'dwi1': (reorient_epi_opposite, 'out_file')
                                })

        ped = pipeline.add('gen_config',
                           GenTopupConfigFiles(),
                           inputs={'ped': (prep_dwi, 'pe')})

        merge_outputs = pipeline.add('merge_files',
                                     merge_lists(2),
                                     inputs={
                                         'in1': (prep_dwi, 'main'),
                                         'in2': (prep_dwi, 'secondary')
                                     })

        merge = pipeline.add('fsl_merge',
                             fsl_merge(dimension='t', output_type='NIFTI_GZ'),
                             inputs={'in_files': (merge_outputs, 'out')},
                             requirements=[fsl_req.v('5.0.9')])

        topup = pipeline.add('topup',
                             TOPUP(output_type='NIFTI_GZ'),
                             inputs={
                                 'in_file': (merge, 'merged_file'),
                                 'encoding_file': (ped, 'config_file')
                             },
                             requirements=[fsl_req.v('5.0.9')])

        in_apply_tp = pipeline.add(
            'in_apply_tp',
            merge_lists(1),
            inputs={'in1': (reorient_epi_in, 'out_file')})

        pipeline.add(
            'applytopup',
            ApplyTOPUP(method='jac', in_index=[1], output_type='NIFTI_GZ'),
            inputs={
                'in_files': (in_apply_tp, 'out'),
                'encoding_file': (ped, 'apply_topup_config'),
                'in_topup_movpar': (topup, 'out_movpar'),
                'in_topup_fieldcoef': (topup, 'out_fieldcoef')
            },
            outputs={'series_preproc': ('out_corrected', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def _fugue_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Fugue distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('series', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_mag_reorient = pipeline.add(
            'reorient_fm_mag',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('field_map_mag', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_phase_reorient = pipeline.add(
            'reorient_fm_phase',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('field_map_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        bet = pipeline.add("bet",
                           BET(robust=True, output_type='NIFTI_GZ'),
                           inputs={'in_file': (fm_mag_reorient, 'out_file')},
                           wall_time=5,
                           requirements=[fsl_req.v('5.0.9')])

        create_fmap = pipeline.add(
            "prepfmap",
            PrepareFieldmap(
                # delta_TE=2.46
            ),
            inputs={
                'delta_TE': ('field_map_delta_te', float),
                "in_magnitude": (bet, "out_file"),
                'in_phase': (fm_phase_reorient, 'out_file')
            },
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add(
            'fugue',
            FUGUE(unwarp_direction='x',
                  dwell_time=self.parameter('fugue_echo_spacing'),
                  unwarped_file='example_func.nii.gz',
                  output_type='NIFTI_GZ'),
            inputs={
                'fmap_in_file': (create_fmap, 'out_fieldmap'),
                'in_file': (reorient_epi_in, 'out_file')
            },
            outputs={'series_preproc': ('unwarped_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='motion_mat_calculation',
                                     desc=("Motion matrices calculation"),
                                     citations=[fsl_cite],
                                     name_maps=name_maps)

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            inputs={
                'reg_mat': ('coreg_fsl_mat', text_matrix_format),
                'qform_mat': ('qform_mat', text_matrix_format)
            },
            outputs={'motion_mats': ('motion_mats', motion_mats_format)})
        if 'reverse_phase' not in self.input_names:
            pipeline.connect_input('align_mats', mm, 'align_mats',
                                   motion_mats_format)

        return pipeline