Ejemplo n.º 1
0
class T2Study(MriStudy, metaclass=StudyMetaClass):

    desc = "T2-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('wm_seg', nifti_gz_format, 'segmentation_pipeline')]

    add_param_specs = [
        SwitchSpec('bet_robust', True),
        ParamSpec('bet_f_threshold', 0.5),
        ParamSpec('bet_reduce_bias', False)]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude', type='T2w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))]

    def segmentation_pipeline(self, img_type=2, **name_maps):

        pipeline = self.new_pipeline(
            name='FAST_segmentation',
            name_maps=name_maps,
            desc="White matter segmentation of the reference image",
            citations=[fsl_cite])

        fast = pipeline.add(
            'fast',
            fsl.FAST(
                img_type=img_type,
                segments=True,
                out_basename='Reference_segmentation',
                output_type='NIFTI_GZ'),
            inputs={
                'in_files': ('brain', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        # Determine output field of split to use
        if img_type == 1:
            split_output = 'out3'
        elif img_type == 2:
            split_output = 'out2'
        else:
            raise BananaUsageError(
                "'img_type' parameter can either be 1 or 2 (not {})"
                .format(img_type))

        pipeline.add(
            'split',
            Split(
                splits=[1, 1, 1],
                squeeze=True),
            inputs={
                'inlist': (fast, 'tissue_class_files')},
            outputs={
                'wm_seg': (split_output, nifti_gz_format)})

        return pipeline
Ejemplo n.º 2
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': ('mag_preproc', nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline

    def bet_T1(self, **name_maps):

        pipeline = self.new_pipeline(
            name='BET_T1',
            name_maps=name_maps,
            desc=("Brain extraction pipeline using FSL's BET"),
            citations=[fsl_cite])

        bias = pipeline.add('n4_bias_correction',
                            ants.N4BiasFieldCorrection(),
                            inputs={'input_image': ('t1', nifti_gz_format)},
                            requirements=[ants_req.v('1.9')],
                            wall_time=60,
                            mem_gb=12)

        pipeline.add('bet',
                     fsl.BET(frac=0.15,
                             reduce_bias=True,
                             output_type='NIFTI_GZ'),
                     inputs={'in_file': (bias, 'output_image')},
                     outputs={
                         'betted_T1': ('out_file', nifti_gz_format),
                         'betted_T1_mask': ('mask_file', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=8,
                     wall_time=45)

        return pipeline

    def cet_T1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='CET_T1',
            name_maps=name_maps,
            desc=("Construct cerebellum mask using SUIT template"),
            citations=[fsl_cite])

        # FIXME: Should convert to inputs
        nl = self._lookup_nl_tfm_inv_name('MNI')
        linear = self._lookup_l_tfm_to_name('MNI')

        # Initially use MNI space to warp SUIT into T1 and threshold to mask
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in2': (nl, nifti_gz_format),
                                       'in1': (linear, nifti_gz_format)
                                   })

        apply_trans = pipeline.add('ApplyTransform',
                                   ants.resampling.ApplyTransforms(
                                       interpolation='NearestNeighbor',
                                       input_image_type=3,
                                       invert_transform_flags=[True, False]),
                                   inputs={
                                       'reference_image':
                                       ('betted_T1', nifti_gz_format),
                                       'input_image':
                                       ('suit_mask', nifti_gz_format),
                                       'transforms': (merge_trans, 'out')
                                   },
                                   requirements=[ants_req.v('1.9')],
                                   mem_gb=16,
                                   wall_time=120)

        pipeline.add('maths2',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas'),
                     inputs={
                         'in_file': ('betted_T1', nifti_gz_format),
                         'in_file2': (apply_trans, 'output_image')
                     },
                     outputs={
                         'cetted_T1': ('out_file', nifti_gz_format),
                         'cetted_T1_mask': ('output_image', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        return pipeline
Ejemplo n.º 3
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    primary_scan_name = 'magnitude'

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': (self.preproc_spec_name, nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline
Ejemplo n.º 4
0
class BoldStudy(EpiSeriesStudy, metaclass=StudyMetaClass):

    desc = "Functional MRI BOLD MRI contrast"

    add_data_specs = [
        InputFilesetSpec('train_data',
                         rfile_format,
                         optional=True,
                         frequency='per_study'),
        FilesetSpec('hand_label_noise', text_format,
                    'fix_preparation_pipeline'),
        FilesetSpec('labelled_components', text_format,
                    'fix_classification_pipeline'),
        FilesetSpec('cleaned_file', nifti_gz_format,
                    'fix_regression_pipeline'),
        FilesetSpec('filtered_data', nifti_gz_format,
                    'rsfMRI_filtering_pipeline'),
        FilesetSpec('mc_par', par_format, 'rsfMRI_filtering_pipeline'),
        FilesetSpec('melodic_ica', zip_format,
                    'single_subject_melodic_pipeline'),
        FilesetSpec('fix_dir', zip_format, 'fix_preparation_pipeline'),
        FilesetSpec('normalized_ts', nifti_gz_format,
                    'timeseries_normalization_to_atlas_pipeline'),
        FilesetSpec('smoothed_ts', nifti_gz_format, 'smoothing_pipeline')
    ]

    add_param_specs = [
        ParamSpec('component_threshold', 20),
        ParamSpec('motion_reg', True),
        ParamSpec('highpass', 0.01),
        ParamSpec('brain_thresh_percent', 5),
        ParamSpec('group_ica_components', 15)
    ]

    primary_bids_selector = BidsInputs(spec_name='series',
                                       type='bold',
                                       valid_formats=(nifti_gz_x_format,
                                                      nifti_gz_format))

    default_bids_inputs = [
        primary_bids_selector,
        BidsAssocInput(spec_name='field_map_phase',
                       primary=primary_bids_selector,
                       association='phasediff',
                       format=nifti_gz_format,
                       drop_if_missing=True),
        BidsAssocInput(spec_name='field_map_mag',
                       primary=primary_bids_selector,
                       association='phasediff',
                       type='magnitude',
                       format=nifti_gz_format,
                       drop_if_missing=True)
    ]

    def rsfMRI_filtering_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='rsfMRI_filtering',
            desc=("Spatial and temporal rsfMRI filtering"),
            citations=[fsl_cite],
            name_maps=name_maps)

        afni_mc = pipeline.add(
            'AFNI_MC',
            Volreg(zpad=1,
                   out_file='rsfmri_mc.nii.gz',
                   oned_file='prefiltered_func_data_mcf.par'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            outputs={'mc_par': ('oned_file', par_format)},
            wall_time=5,
            requirements=[afni_req.v('16.2.10')])

        filt = pipeline.add('Tproject',
                            Tproject(stopband=(0, 0.01),
                                     polort=3,
                                     blur=3,
                                     out_file='filtered_func_data.nii.gz'),
                            inputs={
                                'delta_t': ('tr', float),
                                'mask':
                                (self.brain_mask_spec_name, nifti_gz_format),
                                'in_file': (afni_mc, 'out_file')
                            },
                            wall_time=5,
                            requirements=[afni_req.v('16.2.10')])

        meanfunc = pipeline.add('meanfunc',
                                ImageMaths(op_string='-Tmean',
                                           suffix='_mean',
                                           output_type='NIFTI_GZ'),
                                wall_time=5,
                                inputs={'in_file': (afni_mc, 'out_file')},
                                requirements=[fsl_req.v('5.0.10')])

        pipeline.add('add_mean',
                     ImageMaths(op_string='-add', output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': (filt, 'out_file'),
                         'in_file2': (meanfunc, 'out_file')
                     },
                     outputs={'filtered_data': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def single_subject_melodic_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='MelodicL1',
            desc=("Single subject ICA analysis using FSL MELODIC."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('melodic_L1',
                     MELODIC(
                         no_bet=True,
                         bg_threshold=self.parameter('brain_thresh_percent'),
                         report=True,
                         out_stats=True,
                         mm_thresh=0.5,
                         out_dir='melodic_ica',
                         output_type='NIFTI_GZ'),
                     inputs={
                         'mask': (self.brain_mask_spec_name, nifti_gz_format),
                         'tr_sec': ('tr', float),
                         'in_files': ('filtered_data', nifti_gz_format)
                     },
                     outputs={'melodic_ica': ('out_dir', directory_format)},
                     wall_time=15,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def fix_preparation_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='prepare_fix',
            desc=("Pipeline to create the right folder structure before "
                  "running FIX"),
            citations=[fsl_cite],
            name_maps=name_maps)

        if self.branch('coreg_to_tmpl_method', 'ants'):

            struct_ants2fsl = pipeline.add(
                'struct_ants2fsl',
                ANTs2FSLMatrixConversion(ras2fsl=True),
                inputs={
                    'reference_file': ('template_brain', nifti_gz_format),
                    'itk_file': ('coreg_to_tmpl_ants_mat', text_matrix_format),
                    'source_file': ('coreg_ref_brain', nifti_gz_format)
                },
                requirements=[c3d_req.v('1.0.0')])

            struct_matrix = (struct_ants2fsl, 'fsl_matrix')
        else:
            struct_matrix = ('coreg_to_tmpl_fsl_mat', text_matrix_format)


#         if self.branch('coreg_method', 'ants'):
#         epi_ants2fsl = pipeline.add(
#             'epi_ants2fsl',
#             ANTs2FSLMatrixConversion(
#                 ras2fsl=True),
#             inputs={
#                 'source_file': ('brain', nifti_gz_format),
#                 'itk_file': ('coreg_ants_mat', text_matrix_format),
#                 'reference_file': ('coreg_ref_brain', nifti_gz_format)},
#             requirements=[c3d_req.v('1.0.0')])

        MNI2t1 = pipeline.add('MNI2t1',
                              ConvertXFM(invert_xfm=True),
                              inputs={'in_file': struct_matrix},
                              wall_time=5,
                              requirements=[fsl_req.v('5.0.9')])

        struct2epi = pipeline.add(
            'struct2epi',
            ConvertXFM(invert_xfm=True),
            inputs={'in_file': ('coreg_fsl_mat', text_matrix_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        meanfunc = pipeline.add(
            'meanfunc',
            ImageMaths(op_string='-Tmean',
                       suffix='_mean',
                       output_type='NIFTI_GZ'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add('prep_fix',
                     PrepareFIX(),
                     inputs={
                         'melodic_dir': ('melodic_ica', directory_format),
                         't1_brain': ('coreg_ref_brain', nifti_gz_format),
                         'mc_par': ('mc_par', par_format),
                         'epi_brain_mask': ('brain_mask', nifti_gz_format),
                         'epi_preproc': ('series_preproc', nifti_gz_format),
                         'filtered_epi': ('filtered_data', nifti_gz_format),
                         'epi2t1_mat': ('coreg_fsl_mat', text_matrix_format),
                         't12MNI_mat': (struct_ants2fsl, 'fsl_matrix'),
                         'MNI2t1_mat': (MNI2t1, 'out_file'),
                         't12epi_mat': (struct2epi, 'out_file'),
                         'epi_mean': (meanfunc, 'out_file')
                     },
                     outputs={
                         'fix_dir': ('fix_dir', directory_format),
                         'hand_label_noise': ('hand_label_file', text_format)
                     })

        return pipeline

    def fix_classification_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fix_classification',
            desc=("Automatic classification of noisy components from the "
                  "rsfMRI data using fsl FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "fix",
            FSLFIX(component_threshold=self.parameter('component_threshold'),
                   motion_reg=self.parameter('motion_reg'),
                   classification=True),
            inputs={
                "feat_dir": ("fix_dir", directory_format),
                "train_data": ("train_data", rfile_format)
            },
            outputs={'labelled_components': ('label_file', text_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def fix_regression_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='signal_regression',
            desc=("Regression of the noisy components from the rsfMRI data "
                  "using a python implementation equivalent to that in FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "signal_reg",
            SignalRegression(motion_regression=self.parameter('motion_reg'),
                             highpass=self.parameter('highpass')),
            inputs={
                "fix_dir": ("fix_dir", directory_format),
                "labelled_components": ("labelled_components", text_format)
            },
            outputs={'cleaned_file': ('output', nifti_gz_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline

    def smoothing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='smoothing_pipeline',
            desc=("Spatial smoothing of the normalized fmri file"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('3dBlurToFWHM',
                     BlurToFWHM(fwhm=5, out_file='smoothed_ts.nii.gz'),
                     inputs={
                         'mask': ('template_mask', nifti_gz_format),
                         'in_file': ('normalized_ts', nifti_gz_format)
                     },
                     outputs={'smoothed_ts': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[afni_req.v('16.2.10')])

        return pipeline
Ejemplo n.º 5
0
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):

    desc = "Diffusion-weighted MRI contrast"

    add_data_specs = [
        InputFilesetSpec('anat_5tt', mrtrix_image_format,
                         desc=("A co-registered segmentation image taken from "
                               "freesurfer output and simplified into 5 tissue"
                               " types. Used in ACT streamlines tractography"),
                         optional=True),
        InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
                         desc=("Co-registered freesurfer recon-all output. "
                               "Used in building the connectome")),
        InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
        FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
        FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
                    'series_coreg_pipeline',
                    desc=("The gradient directions coregistered to the "
                          "orientation of the coreg reference")),
        FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('noise_residual', mrtrix_image_format,
                    'preprocess_pipeline',
                    desc=("")),
        FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
                    desc=("")),
        FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
                    desc=("")),
        FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
                    desc=("")),
        FilesetSpec('wm_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('gm_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('csf_response', text_format, 'response_pipeline',
                    desc=("")),
        FilesetSpec('avg_response', text_format, 'average_response_pipeline',
                    desc=("")),
        FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
                    desc=("")),
        FilesetSpec('norm_intensity', mrtrix_image_format,
                    'intensity_normalisation_pipeline',
                    desc=("")),
        FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
                    'intensity_normalisation_pipeline', frequency='per_study',
                    desc=("")),
        FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
                    'intensity_normalisation_pipeline', frequency='per_study',
                    desc=("")),
        FilesetSpec('global_tracks', mrtrix_track_format,
                    'global_tracking_pipeline',
                    desc=("")),
        FilesetSpec('wm_mask', mrtrix_image_format,
                    'global_tracking_pipeline',
                    desc=("")),
        FilesetSpec('connectome', csv_format, 'connectome_pipeline',
                    desc=(""))]

    add_param_specs = [
        ParamSpec('multi_tissue', True,
                  desc=("")),
        ParamSpec('preproc_pe_dir', None, dtype=str,
                  desc=("")),
        ParamSpec('tbss_skel_thresh', 0.2,
                  desc=("")),
        ParamSpec('fsl_mask_f', 0.25,
                  desc=("")),
        ParamSpec('bet_robust', True,
                  desc=("")),
        ParamSpec('bet_f_threshold', 0.2,
                  desc=("")),
        ParamSpec('bet_reduce_bias', False,
                  desc=("")),
        ParamSpec('num_global_tracks', int(1e9),
                  desc=("")),
        ParamSpec('global_tracks_cutoff', 0.05,
                  desc=("")),
        SwitchSpec('preproc_denoise', False,
                   desc=("")),
        SwitchSpec('response_algorithm', 'tax',
                   ('tax', 'dhollander', 'msmt_5tt'),
                   desc=("")),
        SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
                   desc=("")),
        MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
        SwitchSpec('reorient2std', False,
                   desc=(""))]

    primary_bids_input = BidsInputs(
        spec_name='series', type='dwi',
        valid_formats=(nifti_gz_x_format, nifti_gz_format))

    default_bids_inputs = [primary_bids_input,
                           BidsAssocInputs(
                               spec_name='bvalues',
                               primary=primary_bids_input,
                               association='grads',
                               type='bval',
                               format=fsl_bvals_format),
                           BidsAssocInputs(
                               spec_name='grad_dirs',
                               primary=primary_bids_input,
                               association='grads',
                               type='bvec',
                               format=fsl_bvecs_format),
                           BidsAssocInputs(
                               spec_name='reverse_phase',
                               primary=primary_bids_input,
                               association='epi',
                               format=nifti_gz_format,
                               drop_if_missing=True)]

    RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5

    primary_scan_name = 'series'

    @property
    def multi_tissue(self):
        return self.branch('response_algorithm',
                           ('msmt_5tt', 'dhollander'))

    def fsl_grads(self, pipeline, coregistered=True):
        "Adds and returns a node to the pipeline to merge the FSL grads and "
        "bvecs"
        try:
            fslgrad = pipeline.node('fslgrad')
        except ArcanaNameError:
            if self.is_coregistered and coregistered:
                grad_dirs = 'grad_dirs_coreg'
            else:
                grad_dirs = 'grad_dirs'
            # Gradient merge node
            fslgrad = pipeline.add(
                "fslgrad",
                MergeTuple(2),
                inputs={
                    'in1': (grad_dirs, fsl_bvecs_format),
                    'in2': ('bvalues', fsl_bvals_format)})
        return (fslgrad, 'out')

    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts the first b==0 volume from the series",
            citations=[],
            name_maps=name_maps)

        dwiextract = pipeline.add(
            'dwiextract',
            ExtractDWIorB0(
                bzero=True,
                out_ext='.nii.gz'),
            inputs={
                'in_file': ('series', nifti_gz_format),
                'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
            requirements=[mrtrix_req.v('3.0rc3')])

        pipeline.add(
            "extract_first_vol",
            MRConvert(
                coord=(3, 0)),
            inputs={
                'in_file': (dwiextract, 'out_file')},
            outputs={
                'magnitude': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def preprocess_pipeline(self, **name_maps):
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite, n4_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            citations=references)

        # Create nodes to gradients to FSL format
        if self.input('series').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('series', dicom_format)},
                outputs={
                    'grad_dirs': ('bvecs_file', fsl_bvecs_format),
                    'bvalues': ('bvals_file', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
                               'in2': (extract_grad, 'bvals_file')}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
                               'in2': ('bvalues', fsl_bvals_format)}
        else:
            raise BananaUsageError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            inputs=grad_fsl_inputs)

        gradients = (grad_fsl, 'out')

        # Create node to reorient preproc out_file
        if self.branch('reorient2std'):
            reorient = pipeline.add(
                'fslreorient2std',
                fsl.utils.Reorient2Std(
                    output_type='NIFTI_GZ'),
                inputs={
                    'in_file': ('series', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.9')])
            reoriented = (reorient, 'out_file')
        else:
            reoriented = ('series', nifti_gz_format)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': reoriented},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': reoriented,
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                inputs={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'noise_residual': ('out_file', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            denoised = (denoise, 'out_file')
        else:
            denoised = reoriented

        # Preproc kwargs
        preproc_kwargs = {}
        preproc_inputs = {'in_file': denoised,
                          'grad_fsl': gradients}

        if self.provided('reverse_phase'):

            if self.provided('magnitude', default_okay=False):
                dwi_reference = ('magnitude', mrtrix_image_format)
            else:
                # Extract b=0 volumes
                dwiextract = pipeline.add(
                    'dwiextract',
                    ExtractDWIorB0(
                        bzero=True,
                        out_ext='.nii.gz'),
                    inputs={
                        'in_file': denoised,
                        'fslgrad': gradients},
                    requirements=[mrtrix_req.v('3.0rc3')])

                # Get first b=0 from dwi b=0 volumes
                extract_first_b0 = pipeline.add(
                    "extract_first_vol",
                    MRConvert(
                        coord=(3, 0)),
                    inputs={
                        'in_file': (dwiextract, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])

                dwi_reference = (extract_first_b0, 'out_file')

            # Concatenate extracted forward rpe with reverse rpe
            combined_images = pipeline.add(
                'combined_images',
                MRCat(),
                inputs={
                    'first_scan': dwi_reference,
                    'second_scan': ('reverse_phase', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_inputs['se_epi'] = (combined_images, 'out_file')
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            inputs=preproc_inputs,
            outputs={
                'eddy_par': ('eddy_parameters', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60)

        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        mask = pipeline.add(
            'dwi2mask',
            BrainMask(
                out_file='brainmask.nii.gz'),
            inputs={
                'in_file': (preproc, 'out_file'),
                'grad_fsl': gradients},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Create bias correct node
        pipeline.add(
            "bias_correct",
            DWIBiasCorrect(
                method='ants'),
            inputs={
                'grad_fsl': gradients,  # internal
                'in_file': (preproc, 'out_file'),
                'mask': (mask, 'out_file')},
            outputs={
                'series_preproc': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])

        return pipeline

    def brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('bet_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                citations=[mrtrix_cite],
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                series = 'series_coreg'
            else:
                series = 'series_preproc'

            # Create mask node
            masker = pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': (series, nifti_gz_format),
                    'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
                outputs={
                    'brain_mask': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            merge = pipeline.add(
                'merge_operands',
                Merge(2),
                inputs={
                    'in1': ('mag_preproc', nifti_gz_format),
                    'in2': (masker, 'out_file')})

            pipeline.add(
                'apply_mask',
                MRCalc(
                    operation='multiply'),
                inputs={
                    'operands': (merge, 'out')},
                outputs={
                    'brain': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
        else:
            pipeline = super().brain_extraction_pipeline(**name_maps)
        return pipeline

    def series_coreg_pipeline(self, **name_maps):

        pipeline = super().series_coreg_pipeline(**name_maps)

        # Apply coregistration transform to gradients
        pipeline.add(
            'transform_grads',
            TransformGradients(),
            inputs={
                'gradients': ('grad_dirs', fsl_bvecs_format),
                'transform': ('coreg_fsl_mat', text_matrix_format)},
            outputs={
                'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})

        return pipeline

    def intensity_normalisation_pipeline(self, **name_maps):

        if self.num_sessions < 2:
            raise ArcanaMissingDataException(
                "Cannot normalise intensities of DWI images as study only "
                "contains a single session")
        elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
            logger.warning(
                "The number of sessions in the study ({}) is less than the "
                "recommended number for intensity normalisation ({}). The "
                "results may be unreliable".format(
                    self.num_sessions,
                    self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))

        pipeline = self.new_pipeline(
            name='intensity_normalization',
            desc="Corrects for B1 field inhomogeneity",
            citations=[mrtrix_req.v('3.0rc3')],
            name_maps=name_maps)

        mrconvert = pipeline.add(
            'mrconvert',
            MRConvert(
                out_ext='.mif'),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Pair subject and visit ids together, expanding so they can be
        # joined and chained together
        session_ids = pipeline.add(
            'session_ids',
            utility.IdentityInterface(
                ['subject_id', 'visit_id']),
            inputs={
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)})

        # Set up join nodes
        join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
        join_over_subjects = pipeline.add(
            'join_over_subjects',
            utility.IdentityInterface(
                join_fields),
            inputs={
                'masks': (self.brain_mask_spec_name, nifti_gz_format),
                'dwis': (mrconvert, 'out_file'),
                'subject_ids': (session_ids, 'subject_id'),
                'visit_ids': (session_ids, 'visit_id')},
            joinsource=self.SUBJECT_ID,
            joinfield=join_fields)

        join_over_visits = pipeline.add(
            'join_over_visits',
            Chain(
                join_fields),
            inputs={
                'dwis': (join_over_subjects, 'dwis'),
                'masks': (join_over_subjects, 'masks'),
                'subject_ids': (join_over_subjects, 'subject_ids'),
                'visit_ids': (join_over_subjects, 'visit_ids')},
            joinsource=self.VISIT_ID,
            joinfield=join_fields)

        # Intensity normalization
        intensity_norm = pipeline.add(
            'dwiintensitynorm',
            DWIIntensityNorm(),
            inputs={
                'in_files': (join_over_visits, 'dwis'),
                'masks': (join_over_visits, 'masks')},
            outputs={
                'norm_intens_fa_template': ('fa_template',
                                            mrtrix_image_format),
                'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Set up expand nodes
        pipeline.add(
            'expand', SelectSession(),
            inputs={
                'subject_ids': (join_over_visits, 'subject_ids'),
                'visit_ids': (join_over_visits, 'visit_ids'),
                'inlist': (intensity_norm, 'out_files'),
                'subject_id': (Study.SUBJECT_ID, int),
                'visit_id': (Study.VISIT_ID, int)},
            outputs={
                'norm_intensity': ('item', mrtrix_image_format)})

        # Connect inputs
        return pipeline

    def tensor_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='tensor',
            desc=("Estimates the apparent diffusion tensor in each "
                  "voxel"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'dwi2tensor',
            FitTensor(
                out_file='dti.nii.gz'),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'tensor': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def tensor_metrics_pipeline(self, **name_maps):
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

        pipeline = self.new_pipeline(
            name='fa',
            desc=("Calculates the FA and ADC from a tensor image"),
            citations=[],
            name_maps=name_maps)

        # Create tensor fit node
        pipeline.add(
            'metrics',
            TensorMetrics(
                out_fa='fa.nii.gz',
                out_adc='adc.nii.gz'),
            inputs={
                'in_file': ('tensor', nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'fa': ('out_fa', nifti_gz_format),
                'adc': ('out_adc', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def response_pipeline(self, **name_maps):
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        response_algorithm : str
            Algorithm used to estimate the response
        """

        pipeline = self.new_pipeline(
            name='response',
            desc=("Estimates the fibre response function"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Create fod fit node
        response = pipeline.add(
            'response',
            ResponseSD(
                algorithm=self.parameter('response_algorithm')),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
            outputs={
                'wm_response': ('wm_file', text_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Connect to outputs
        if self.multi_tissue:
            response.inputs.gm_file = 'gm.txt',
            response.inputs.csf_file = 'csf.txt',
            pipeline.connect_output('gm_response', response, 'gm_file',
                                    text_format)
            pipeline.connect_output('csf_response', response, 'csf_file',
                                    text_format)

        return pipeline

    def average_response_pipeline(self, **name_maps):
        """
        Averages the estimate response function over all subjects in the
        project
        """

        pipeline = self.new_pipeline(
            name='average_response',
            desc=(
                "Averages the fibre response function over the project"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        join_subjects = pipeline.add(
            'join_subjects',
            utility.IdentityInterface(['responses']),
            inputs={
                'responses': ('wm_response', text_format)},
            outputs={},
            joinsource=self.SUBJECT_ID,
            joinfield=['responses'])

        join_visits = pipeline.add(
            'join_visits',
            Chain(['responses']),
            inputs={
                'responses': (join_subjects, 'responses')},
            joinsource=self.VISIT_ID,
            joinfield=['responses'])

        pipeline.add(
            'avg_response',
            AverageResponse(),
            inputs={
                'in_files': (join_visits, 'responses')},
            outputs={
                'avg_response': ('out_file', text_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def fod_pipeline(self, **name_maps):
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        """

        pipeline = self.new_pipeline(
            name='fod',
            desc=("Estimates the fibre orientation distribution in each"
                  " voxel"),
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Create fod fit node
        dwi2fod = pipeline.add(
            'dwi2fod',
            EstimateFOD(
                algorithm=self.parameter('fod_algorithm')),
            inputs={
                'in_file': (self.series_preproc_spec_name, nifti_gz_format),
                'wm_txt': ('wm_response', text_format),
                'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
                'grad_fsl': self.fsl_grads(pipeline)},
            outputs={
                'wm_odf': ('wm_odf', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.multi_tissue:
            dwi2fod.inputs.gm_odf = 'gm.mif',
            dwi2fod.inputs.csf_odf = 'csf.mif',
            pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
                                   text_format),
            pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
                                   text_format),
            pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
                                    nifti_gz_format),
            pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
                                    nifti_gz_format),
        # Check inputs/output are connected
        return pipeline

    def extract_b0_pipeline(self, **name_maps):
        """
        Extracts the b0 images from a DWI study and takes their mean
        """

        pipeline = self.new_pipeline(
            name='extract_b0',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        # Extraction node
        extract_b0s = pipeline.add(
            'extract_b0s',
            ExtractDWIorB0(
                bzero=True,
                quiet=True),
            inputs={
                'fslgrad': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        # FIXME: Need a registration step before the mean
        # Mean calculation node
        mean = pipeline.add(
            "mean",
            MRMath(
                axis=3,
                operation='mean',
                quiet=True),
            inputs={
                'in_files': (extract_b0s, 'out_file')},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Convert to Nifti
        pipeline.add(
            "output_conversion",
            MRConvert(
                out_ext='.nii.gz',
                quiet=True),
            inputs={
                'in_file': (mean, 'out_file')},
            outputs={
                'b0': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def global_tracking_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='global_tracking',
            desc="Extract b0 image from a DWI study",
            citations=[mrtrix_cite],
            name_maps=name_maps)

        mask = pipeline.add(
            'mask',
            DWI2Mask(),
            inputs={
                'grad_fsl': self.fsl_grads(pipeline),
                'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        tracking = pipeline.add(
            'tracking',
            Tractography(
                select=self.parameter('num_global_tracks'),
                cutoff=self.parameter('global_tracks_cutoff')),
            inputs={
                'seed_image': (mask, 'out_file'),
                'in_file': ('wm_odf', mrtrix_image_format)},
            outputs={
                'global_tracks': ('out_file', mrtrix_track_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        if self.provided('anat_5tt'):
            pipeline.connect_input('anat_5tt', tracking, 'act_file',
                                   mrtrix_image_format)

        return pipeline

    def intrascan_alignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='affine_mat_generation',
            desc=("Generation of the affine matrices for the main dwi "
                  "sequence starting from eddy motion parameters"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'gen_aff_mats',
            AffineMatrixGeneration(),
            inputs={
                'reference_image': ('mag_preproc', nifti_gz_format),
                'motion_parameters': ('eddy_par', eddy_par_format)},
            outputs={
                'align_mats': ('affine_matrices', motion_mats_format)})

        return pipeline

    def connectome_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='connectome',
            desc=("Generate a connectome from whole brain connectivity"),
            citations=[],
            name_maps=name_maps)

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(
                sub_paths=['mri', 'aparc+aseg.mgz']),
            inputs={
                'base_path': ('anat_fs_recon_all', directory_format)})

        pipeline.add(
            'connectome',
            mrtrix3.BuildConnectome(),
            inputs={
                'in_file': ('global_tracks', mrtrix_track_format),
                'in_parc': (aseg_path, 'out_path')},
            outputs={
                'connectome': ('out_file', csv_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline