Пример #1
0
    def connectome_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='connectome',
            desc=("Generate a connectome from whole brain connectivity"),
            citations=[],
            name_maps=name_maps)

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(
                sub_paths=['mri', 'aparc+aseg.mgz']),
            inputs={
                'base_path': ('anat_fs_recon_all', directory_format)})

        pipeline.add(
            'connectome',
            mrtrix3.BuildConnectome(),
            inputs={
                'in_file': ('global_tracks', mrtrix_track_format),
                'in_parc': (aseg_path, 'out_path')},
            outputs={
                'connectome': ('out_file', csv_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline
Пример #2
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.mrtrix3 as mrtrix3
        from clinica.lib.nipype.interfaces.mrtrix.preprocess import MRTransform
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.lib.nipype.interfaces.mrtrix3.tracking import Tractography
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.stream import cprint
        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.utils.mri_registration import convert_flirt_transformation_to_mrtrix_transformation

        # cprint('Building the pipeline...')

        # Nodes
        # =====

        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction",
                              interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = 't'
        select_node = npe.Node(name="Reg-0-DWI-B0Selection",
                               interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking",
                             interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(name="Reg-1-T12B0Registration",
                                  interface=fsl.FLIRT(
                                      dof=6,
                                      interp='spline',
                                      cost='normmi',
                                      cost_func='normmi',
                                  ))
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(name="Reg-0-T1-T1BrainConvertion",
                                      interface=fs.MRIConvert())
        wm_mask_conv_node = npe.Node(name="Reg-0-T1-WMMaskConvertion",
                                     interface=fs.MRIConvert())

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(name="Reg-2-WMTransformation",
                                     interface=fsl.ApplyXFM())
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=['in_file', 'in_config', 'in_lut', 'out_file'],
            interface=mrtrix3.LabelConvert())
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name='Reg-2-FSL2MrtrixConversion',
            interface=niu.Function(
                input_names=[
                    'in_source_image', 'in_reference_image', 'in_flirt_matrix',
                    'name_output_matrix'
                ],
                output_names=['out_mrtrix_matrix'],
                function=convert_flirt_transformation_to_mrtrix_transformation)
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform())

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(name="1a-ResponseEstimation",
                                   interface=mrtrix3.ResponseSD())
        resp_estim_node.inputs.algorithm = 'tournier'

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation",
                                  interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = 'csd'

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration",
                                interface=Tractography())
        tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        tck_gen_node.inputs.algorithm = 'iFOD2'

        # BUG: Info package does not exist
        # from nipype.interfaces.mrtrix3.base import Info
        # from distutils.version import LooseVersion
        #
        # if Info.looseversion() >= LooseVersion("3.0"):
        #     tck_gen_node.inputs.select = self.parameters['n_tracks']
        # elif Info.looseversion() <= LooseVersion("0.4"):
        #     tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        # else:
        #     from clinica.utils.exceptions import ClinicaException
        #     raise ClinicaException("Your MRtrix version is not supported.")

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(name="3-ConnectomeGeneration",
                                    iterfield=['in_parc', 'out_file'],
                                    interface=mrtrix3.BuildConnectome())

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file'],
            function=utils.print_begin_pipeline),
                                          iterfield='in_bids_or_caps_file',
                                          name='WriteBeginMessage')

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file', 'final_file'],
            function=utils.print_end_pipeline),
                                        iterfield=['in_bids_or_caps_file'],
                                        name='WriteEndMessage')

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name='CAPSFilenamesGeneration',
            interface=niu.Function(input_names='dwi_file',
                                   output_names=self.get_output_fields(),
                                   function=utils.get_caps_filenames))

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        self.connect([
            (self.input_node, print_begin_message,
             [('dwi_file', 'in_bids_or_caps_file')]),  # noqa
            (self.input_node, caps_filenames_node, [('dwi_file', 'dwi_file')]),
            # Response Estimation
            (self.input_node, resp_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (self.input_node, resp_estim_node,
             [('dwi_brainmask_file', 'in_mask')]),  # B0 brain mask # noqa
            (self.input_node, resp_estim_node, [('grad_fsl', 'grad_fsl')
                                                ]),  # bvecs and bvals # noqa
            (caps_filenames_node, resp_estim_node,
             [('response', 'wm_file')]),  # output response filename # noqa
            # FOD Estimation
            (self.input_node, fod_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (resp_estim_node, fod_estim_node,
             [('wm_file', 'wm_txt')]),  # Response (txt file) # noqa
            (self.input_node, fod_estim_node,
             [('dwi_brainmask_file', 'mask_file')]),  # B0 brain mask # noqa
            (self.input_node, fod_estim_node,
             [('grad_fsl', 'grad_fsl')]),  # T1-to-B0 matrix file # noqa
            (caps_filenames_node, fod_estim_node,
             [('fod', 'wm_odf')]),  # output odf filename # noqa
            # Tracts Generation
            (fod_estim_node, tck_gen_node, [('wm_odf', 'in_file')]
             ),  # ODF file # noqa
            (caps_filenames_node, tck_gen_node,
             [('tracts', 'out_file')]),  # output tck filename # noqa
            # Label Conversion
            (self.input_node, label_convert_node, [('atlas_files', 'in_file')]
             ),  # atlas image files # noqa
            (caps_filenames_node, label_convert_node, [
                ('nodes', 'out_file')
            ]),  # converted atlas image filenames # noqa
            # Connectomes Generation
            (tck_gen_node, conn_gen_node, [('out_file', 'in_file')]),  # noqa
            (caps_filenames_node, conn_gen_node, [('connectomes', 'out_file')
                                                  ]),  # noqa
        ])
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                # MGZ Files Conversion
                (self.input_node, t1_brain_conv_node, [('t1_brain_file',
                                                        'in_file')]),  # noqa
                (self.input_node, wm_mask_conv_node, [('wm_mask_file',
                                                       'in_file')]),  # noqa
                # B0 Extraction
                (self.input_node, split_node, [('dwi_file', 'in_file')]
                 ),  # noqa
                (split_node, select_node, [('out_files', 'inlist')]),  # noqa
                # Masking
                (select_node, mask_node, [('out', 'in_file')]),  # B0 # noqa
                (self.input_node, mask_node,
                 [('dwi_brainmask_file', 'mask_file')]),  # Brain mask # noqa
                # T1-to-B0 Registration
                (t1_brain_conv_node, t12b0_reg_node, [('out_file', 'in_file')]
                 ),  # Brain # noqa
                (mask_node, t12b0_reg_node, [('out_file', 'reference')
                                             ]),  # B0 brain-masked # noqa
                # WM Transformation
                (wm_mask_conv_node, wm_transform_node,
                 [('out_file', 'in_file')]),  # Brain mask # noqa
                (mask_node, wm_transform_node, [('out_file', 'reference')
                                                ]),  # BO brain-masked # noqa
                (t12b0_reg_node, wm_transform_node, [
                    ('out_matrix_file', 'in_matrix_file')
                ]),  # T1-to-B0 matrix file # noqa
                # FSL flirt matrix to MRtrix matrix Conversion
                (t1_brain_conv_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_source_image')]),  # noqa
                (mask_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_reference_image')]),  # noqa
                (t12b0_reg_node, fsl2mrtrix_conv_node,
                 [('out_matrix_file', 'in_flirt_matrix')]),  # noqa
                # Apply registration without resampling on parcellations
                (label_convert_node, parc_transform_node,
                 [('out_file', 'in_files')]),  # noqa
                (fsl2mrtrix_conv_node, parc_transform_node,
                 [('out_mrtrix_matrix', 'linear_transform')]),  # noqa
                (caps_filenames_node, parc_transform_node,
                 [('nodes', 'out_filename')]),  # noqa
            ])
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                (wm_transform_node, tck_gen_node, [('out_file', 'seed_image')
                                                   ]),  # noqa
                (parc_transform_node, conn_gen_node, [('out_file', 'in_parc')
                                                      ]),  # noqa
                (parc_transform_node, self.output_node, [('out_file', 'nodes')
                                                         ]),  # noqa
            ])
        elif self.parameters['dwi_space'] == 'T1w':
            self.connect([
                (self.input_node, tck_gen_node, [('wm_mask_file', 'seed_image')
                                                 ]),  # noqa
                (label_convert_node, conn_gen_node, [('out_file', 'in_parc')
                                                     ]),  # noqa
                (label_convert_node, self.output_node, [('out_file', 'nodes')
                                                        ]),  # noqa
            ])
        else:
            raise ClinicaCAPSError(
                'Bad preprocessed DWI space. Please check your CAPS '
                'folder.')
        # Outputs
        # -------
        self.connect([
            (resp_estim_node, self.output_node, [('wm_file', 'response')]),
            (fod_estim_node, self.output_node, [('wm_odf', 'fod')]),
            (tck_gen_node, self.output_node, [('out_file', 'tracts')]),
            (conn_gen_node, self.output_node, [('out_file', 'connectomes')]),
            (self.input_node, print_end_message, [('dwi_file',
                                                   'in_bids_or_caps_file')]),
            (conn_gen_node, print_end_message, [('out_file', 'final_file')]),
        ])
Пример #3
0
    def preprocess_dwi_data(self,
                            data,
                            index,
                            acqp,
                            atlas2use,
                            ResponseSD_algorithm='tournier',
                            fod_algorithm='csd',
                            tract_algorithm='iFOD2',
                            streamlines_number='10M'):
        '''
        preprocessing of dwi data and connectome extraction

        Parameters
        ----------

        subjects_dir = path to the subjects' folders
        data: tuple |
            a tuple having the path to dwi, bvecs and bvals files. It is obtained
            using the function grab_data()
        index: str |
            Name of text file specifying the relationship between the images in
            --imain and the information in --acqp and --topup. E.g. index.txt
        acqp: str |
            Name of text file with information about the acquisition of the images
            in --imain
        atlas2use: str |
             The input node parcellation image
        ResponseSD_algorithm (optional): str |
             Select the algorithm to be used to complete the script operation;
             Options are: dhollander, fa, manual, msmt_5tt, tax, tournier
             (Default is 'tournier')
        fod_algorithm (optional): str |
             The algorithm to use for FOD estimation. (options are: csd,msmt_csd)
             (Default is 'csd')
        tract_algorithm (optional): str |
            specify the tractography algorithm to use. Valid choices are: FACT,
            iFOD1, iFOD2, Nulldist1, Nulldist2, SD_Stream, Seedtest, Tensor_Det,
            Tensor_Prob (Default is 'iFOD2')
        streamlines_number (optional): str |
            set the desired number of streamlines (Default is '10M')
    '''

        if len(data[0]) != len(data[1]):
            raise ValueError(
                'dwi datas do not have the same shape of bvec files')
        if len(data[0]) != len(data[2]):
            raise ValueError(
                'dwi datas do not have the same shape of bval files')
        if len(data[1]) != len(data[2]):
            raise ValueError(
                'bvec files do not have the same shape of bvec files')

        for subj in range(len(data[0])):
            print('Extracting B0 volume for subject', subj)
            self.roi = fsl.ExtractROI(
                in_file=data[0][subj],
                roi_file=os.path.join(
                    os.path.split(data[0][subj])[0] + '/' +
                    os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                    '_nodiff.nii.gz'),
                t_min=0,
                t_size=1)
            self.roi.run()

            print('Converting into .mif for subject', subj)
            self.mrconvert = mrt.MRConvert()
            self.mrconvert.inputs.in_file = data[0][subj]
            self.mrconvert.inputs.grad_fsl = (data[1][subj], data[2][subj])
            self.mrconvert.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif')
            self.mrconvert.run()

            print('Denoising data for subject', subj)
            self.denoise = mrt.DWIDenoise()
            self.denoise.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif')
            self.denoise.inputs.noise = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_noise.mif')
            self.denoise.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.mif')
            self.denoise.run()

            self.denoise_convert = mrt.MRConvert()
            self.denoise_convert.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.mif')
            self.denoise_convert.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.nii.gz')
            self.denoise_convert.run()

            print('Skull stripping for subject', subj)
            self.mybet = fsl.BET()
            self.mybet.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_nodiff.nii.gz')
            self.mybet.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain.nii.gz')
            self.mybet.inputs.frac = 0.1
            self.mybet.inputs.robust = True
            self.mybet.inputs.mask = True
            self.mybet.run()

            print('Running Eddy for subject', subj)
            self.eddy = Eddy()
            self.eddy.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.nii.gz')
            self.eddy.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.eddy.inputs.in_acqp = acqp
            self.eddy.inputs.in_bvec = data[1][subj]
            self.eddy.inputs.in_bval = data[2][subj]
            self.eddy.inputs.out_base = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.nii.gz')
            self.eddy.run()

            print('Running Bias Correction for subject', subj)
            self.bias_correct = mrt.DWIBiasCorrect()
            self.bias_correct.inputs.use_ants = True
            self.bias_correct.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.nii.gz')
            self.bias_correct.inputs.grad_fsl = (os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj])
            self.bias_correct.inputs.bias = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_bias.mif')
            self.bias_correct.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.bias_correct.run()

            print('Calculating Response function for subject', subj)
            self.resp = mrt.ResponseSD()
            self.resp.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.resp.inputs.algorithm = ResponseSD_algorithm
            self.resp.inputs.grad_fsl = (os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj])
            self.resp.inputs.wm_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.resp.run()

            print('Estimating FOD for subject', subj)
            self.fod = mrt.EstimateFOD()
            self.fod.inputs.algorithm = fod_algorithm
            self.fod.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.fod.inputs.wm_txt = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.fod.inputs.mask_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.fod.inputs.grad_fsl = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.fod.run()

            print('Extracting whole brain tract for subject', subj)
            self.tk = mrt.Tractography()
            self.tk.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + 'fods.mif')
            self.tk.inputs.roi_mask = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.tk.inputs.algorithm = tract_algorithm
            self.tk.inputs.seed_image = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.tk.inputs.select = streamlines_number
            self.tk.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_whole_brain_' + streamlines_number + '.tck')
            self.tk.run()

            print('Extracting connectome for subject', subj)
            self.mat = mrt.BuildConnectome()
            self.mat.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_whole_brain_' + streamlines_number + '.tck')
            self.mat.inputs.in_parc = atlas2use
            self.mat.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_connectome.csv')
            self.mat.run()
Пример #4
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.mrtrix3 as mrtrix3
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        from nipype.interfaces.mrtrix3.tracking import Tractography
        from nipype.interfaces.mrtrix.preprocess import MRTransform

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.utils.exceptions import ClinicaCAPSError
        from clinica.utils.mri_registration import (
            convert_flirt_transformation_to_mrtrix_transformation,
        )

        # Nodes
        # =====
        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction", interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = "t"
        select_node = npe.Node(name="Reg-0-DWI-B0Selection", interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking", interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(
            name="Reg-1-T12B0Registration",
            interface=fsl.FLIRT(
                dof=6,
                interp="spline",
                cost="normmi",
                cost_func="normmi",
            ),
        )
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(
            name="Reg-0-T1-T1BrainConvertion", interface=fs.MRIConvert()
        )
        wm_mask_conv_node = npe.Node(
            name="Reg-0-T1-WMMaskConvertion", interface=fs.MRIConvert()
        )

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(
            name="Reg-2-WMTransformation", interface=fsl.ApplyXFM()
        )
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=["in_file", "in_config", "in_lut", "out_file"],
            interface=mrtrix3.LabelConvert(),
        )
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name="Reg-2-FSL2MrtrixConversion",
            interface=niu.Function(
                input_names=[
                    "in_source_image",
                    "in_reference_image",
                    "in_flirt_matrix",
                    "name_output_matrix",
                ],
                output_names=["out_mrtrix_matrix"],
                function=convert_flirt_transformation_to_mrtrix_transformation,
            ),
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform(),
        )

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(
            name="1a-ResponseEstimation", interface=mrtrix3.ResponseSD()
        )
        resp_estim_node.inputs.algorithm = "tournier"

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation", interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = "csd"

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration", interface=Tractography())
        tck_gen_node.inputs.select = self.parameters["n_tracks"]
        tck_gen_node.inputs.algorithm = "iFOD2"

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(
            name="3-ConnectomeGeneration",
            iterfield=["in_parc", "out_file"],
            interface=mrtrix3.BuildConnectome(),
        )

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file"],
                function=utils.print_begin_pipeline,
            ),
            iterfield="in_bids_or_caps_file",
            name="WriteBeginMessage",
        )

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file", "final_file"],
                function=utils.print_end_pipeline,
            ),
            iterfield=["in_bids_or_caps_file"],
            name="WriteEndMessage",
        )

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name="CAPSFilenamesGeneration",
            interface=niu.Function(
                input_names="dwi_file",
                output_names=self.get_output_fields(),
                function=utils.get_caps_filenames,
            ),
        )

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        # fmt: off
        self.connect(
            [
                (self.input_node, print_begin_message, [("dwi_file", "in_bids_or_caps_file")]),
                (self.input_node, caps_filenames_node, [("dwi_file", "dwi_file")]),
                # Response Estimation
                (self.input_node, resp_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (self.input_node, resp_estim_node, [("dwi_brainmask_file", "in_mask")]),  # B0 brain mask
                (self.input_node, resp_estim_node, [("grad_fsl", "grad_fsl")]),  # bvecs and bvals
                (caps_filenames_node, resp_estim_node, [("response", "wm_file")]),  # output response filename
                # FOD Estimation
                (self.input_node, fod_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (resp_estim_node, fod_estim_node, [("wm_file", "wm_txt")]),  # Response (txt file)
                (self.input_node, fod_estim_node, [("dwi_brainmask_file", "mask_file")]),  # B0 brain mask
                (self.input_node, fod_estim_node, [("grad_fsl", "grad_fsl")]),  # T1-to-B0 matrix file
                (caps_filenames_node, fod_estim_node, [("fod", "wm_odf")]),  # output odf filename
                # Tracts Generation
                (fod_estim_node, tck_gen_node, [("wm_odf", "in_file")]),  # ODF file
                (caps_filenames_node, tck_gen_node, [("tracts", "out_file")]),  # output tck filename
                # Label Conversion
                (self.input_node, label_convert_node, [("atlas_files", "in_file")]),  # atlas image files
                (caps_filenames_node, label_convert_node, [("nodes", "out_file")]),  # converted atlas image filenames
                # Connectomes Generation
                (tck_gen_node, conn_gen_node, [("out_file", "in_file")]),
                (caps_filenames_node, conn_gen_node, [("connectomes", "out_file")]),
            ]
        )
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    # MGZ Files Conversion
                    (self.input_node, t1_brain_conv_node, [("t1_brain_file", "in_file")]),
                    (self.input_node, wm_mask_conv_node, [("wm_mask_file", "in_file")]),
                    # B0 Extraction
                    (self.input_node, split_node, [("dwi_file", "in_file")]),
                    (split_node, select_node, [("out_files", "inlist")]),
                    # Masking
                    (select_node, mask_node, [("out", "in_file")]),  # B0
                    (self.input_node, mask_node, [("dwi_brainmask_file", "mask_file")]),  # Brain mask
                    # T1-to-B0 Registration
                    (t1_brain_conv_node, t12b0_reg_node, [("out_file", "in_file")]),  # Brain
                    (mask_node, t12b0_reg_node, [("out_file", "reference")]),  # B0 brain-masked
                    # WM Transformation
                    (wm_mask_conv_node, wm_transform_node, [("out_file", "in_file")]),  # Brain mask
                    (mask_node, wm_transform_node, [("out_file", "reference")]),  # BO brain-masked
                    (t12b0_reg_node, wm_transform_node, [("out_matrix_file", "in_matrix_file")]),  # T1-to-B0 matrix file
                    # FSL flirt matrix to MRtrix matrix Conversion
                    (t1_brain_conv_node, fsl2mrtrix_conv_node, [("out_file", "in_source_image")]),
                    (mask_node, fsl2mrtrix_conv_node, [("out_file", "in_reference_image")]),
                    (t12b0_reg_node, fsl2mrtrix_conv_node, [("out_matrix_file", "in_flirt_matrix")]),
                    # Apply registration without resampling on parcellations
                    (label_convert_node, parc_transform_node, [("out_file", "in_files")]),
                    (fsl2mrtrix_conv_node, parc_transform_node, [("out_mrtrix_matrix", "linear_transform")]),
                    (caps_filenames_node, parc_transform_node, [("nodes", "out_filename")]),
                ]
            )
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    (wm_transform_node, tck_gen_node, [("out_file", "seed_image")]),
                    (parc_transform_node, conn_gen_node, [("out_file", "in_parc")]),
                    (parc_transform_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        elif self.parameters["dwi_space"] == "T1w":
            self.connect(
                [
                    (self.input_node, tck_gen_node, [("wm_mask_file", "seed_image")]),
                    (label_convert_node, conn_gen_node, [("out_file", "in_parc")]),
                    (label_convert_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        else:
            raise ClinicaCAPSError(
                "Bad preprocessed DWI space. Please check your CAPS folder."
            )
        # Outputs
        # -------
        self.connect(
            [
                (resp_estim_node, self.output_node, [("wm_file", "response")]),
                (fod_estim_node, self.output_node, [("wm_odf", "fod")]),
                (tck_gen_node, self.output_node, [("out_file", "tracts")]),
                (conn_gen_node, self.output_node, [("out_file", "connectomes")]),
                (self.input_node, print_end_message, [("dwi_file", "in_bids_or_caps_file")]),
                (conn_gen_node, print_end_message, [("out_file", "final_file")]),
            ]
        )