예제 #1
0
def gen_response(dwi_file: str, dwi_mask: str, working_dir: str):
    resp = mrt.ResponseSD()
    resp.inputs.in_file = dwi_file
    resp.inputs.algorithm = "dhollander"
    resp.inputs.csf_file = f"{working_dir}/response_csf.txt"
    resp.inputs.wm_file = f"{working_dir}/response_wm.txt"
    resp.inputs.gm_file = f"{working_dir}/response_gm.txt"
    resp.inputs.in_mask = dwi_mask
    print(resp.cmdline)
    resp.run()
    return resp.inputs.wm_file, resp.inputs.gm_file, resp.inputs.csf_file
예제 #2
0
def generate_response(dwi_file: Path, dwi_mask: Path, working_dir: Path):
    resp = mrt.ResponseSD()
    resp.inputs.in_file = dwi_file
    resp.inputs.algorithm = "dhollander"
    resp.inputs.csf_file = working_dir / "response_csf.txt"
    resp.inputs.wm_file = working_dir / "response_wm.txt"
    resp.inputs.gm_file = working_dir / "response_gm.txt"
    resp.inputs.in_mask = dwi_mask
    print(resp.cmdline)
    resp.run()
    return resp.inputs.wm_file, resp.inputs.gm_file, resp.inputs.csf_file
예제 #3
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.mrtrix3 as mrtrix3
        from clinica.lib.nipype.interfaces.mrtrix.preprocess import MRTransform
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.lib.nipype.interfaces.mrtrix3.tracking import Tractography
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.stream import cprint
        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.utils.mri_registration import convert_flirt_transformation_to_mrtrix_transformation

        # cprint('Building the pipeline...')

        # Nodes
        # =====

        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction",
                              interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = 't'
        select_node = npe.Node(name="Reg-0-DWI-B0Selection",
                               interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking",
                             interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(name="Reg-1-T12B0Registration",
                                  interface=fsl.FLIRT(
                                      dof=6,
                                      interp='spline',
                                      cost='normmi',
                                      cost_func='normmi',
                                  ))
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(name="Reg-0-T1-T1BrainConvertion",
                                      interface=fs.MRIConvert())
        wm_mask_conv_node = npe.Node(name="Reg-0-T1-WMMaskConvertion",
                                     interface=fs.MRIConvert())

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(name="Reg-2-WMTransformation",
                                     interface=fsl.ApplyXFM())
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=['in_file', 'in_config', 'in_lut', 'out_file'],
            interface=mrtrix3.LabelConvert())
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name='Reg-2-FSL2MrtrixConversion',
            interface=niu.Function(
                input_names=[
                    'in_source_image', 'in_reference_image', 'in_flirt_matrix',
                    'name_output_matrix'
                ],
                output_names=['out_mrtrix_matrix'],
                function=convert_flirt_transformation_to_mrtrix_transformation)
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform())

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(name="1a-ResponseEstimation",
                                   interface=mrtrix3.ResponseSD())
        resp_estim_node.inputs.algorithm = 'tournier'

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation",
                                  interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = 'csd'

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration",
                                interface=Tractography())
        tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        tck_gen_node.inputs.algorithm = 'iFOD2'

        # BUG: Info package does not exist
        # from nipype.interfaces.mrtrix3.base import Info
        # from distutils.version import LooseVersion
        #
        # if Info.looseversion() >= LooseVersion("3.0"):
        #     tck_gen_node.inputs.select = self.parameters['n_tracks']
        # elif Info.looseversion() <= LooseVersion("0.4"):
        #     tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        # else:
        #     from clinica.utils.exceptions import ClinicaException
        #     raise ClinicaException("Your MRtrix version is not supported.")

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(name="3-ConnectomeGeneration",
                                    iterfield=['in_parc', 'out_file'],
                                    interface=mrtrix3.BuildConnectome())

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file'],
            function=utils.print_begin_pipeline),
                                          iterfield='in_bids_or_caps_file',
                                          name='WriteBeginMessage')

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file', 'final_file'],
            function=utils.print_end_pipeline),
                                        iterfield=['in_bids_or_caps_file'],
                                        name='WriteEndMessage')

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name='CAPSFilenamesGeneration',
            interface=niu.Function(input_names='dwi_file',
                                   output_names=self.get_output_fields(),
                                   function=utils.get_caps_filenames))

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        self.connect([
            (self.input_node, print_begin_message,
             [('dwi_file', 'in_bids_or_caps_file')]),  # noqa
            (self.input_node, caps_filenames_node, [('dwi_file', 'dwi_file')]),
            # Response Estimation
            (self.input_node, resp_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (self.input_node, resp_estim_node,
             [('dwi_brainmask_file', 'in_mask')]),  # B0 brain mask # noqa
            (self.input_node, resp_estim_node, [('grad_fsl', 'grad_fsl')
                                                ]),  # bvecs and bvals # noqa
            (caps_filenames_node, resp_estim_node,
             [('response', 'wm_file')]),  # output response filename # noqa
            # FOD Estimation
            (self.input_node, fod_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (resp_estim_node, fod_estim_node,
             [('wm_file', 'wm_txt')]),  # Response (txt file) # noqa
            (self.input_node, fod_estim_node,
             [('dwi_brainmask_file', 'mask_file')]),  # B0 brain mask # noqa
            (self.input_node, fod_estim_node,
             [('grad_fsl', 'grad_fsl')]),  # T1-to-B0 matrix file # noqa
            (caps_filenames_node, fod_estim_node,
             [('fod', 'wm_odf')]),  # output odf filename # noqa
            # Tracts Generation
            (fod_estim_node, tck_gen_node, [('wm_odf', 'in_file')]
             ),  # ODF file # noqa
            (caps_filenames_node, tck_gen_node,
             [('tracts', 'out_file')]),  # output tck filename # noqa
            # Label Conversion
            (self.input_node, label_convert_node, [('atlas_files', 'in_file')]
             ),  # atlas image files # noqa
            (caps_filenames_node, label_convert_node, [
                ('nodes', 'out_file')
            ]),  # converted atlas image filenames # noqa
            # Connectomes Generation
            (tck_gen_node, conn_gen_node, [('out_file', 'in_file')]),  # noqa
            (caps_filenames_node, conn_gen_node, [('connectomes', 'out_file')
                                                  ]),  # noqa
        ])
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                # MGZ Files Conversion
                (self.input_node, t1_brain_conv_node, [('t1_brain_file',
                                                        'in_file')]),  # noqa
                (self.input_node, wm_mask_conv_node, [('wm_mask_file',
                                                       'in_file')]),  # noqa
                # B0 Extraction
                (self.input_node, split_node, [('dwi_file', 'in_file')]
                 ),  # noqa
                (split_node, select_node, [('out_files', 'inlist')]),  # noqa
                # Masking
                (select_node, mask_node, [('out', 'in_file')]),  # B0 # noqa
                (self.input_node, mask_node,
                 [('dwi_brainmask_file', 'mask_file')]),  # Brain mask # noqa
                # T1-to-B0 Registration
                (t1_brain_conv_node, t12b0_reg_node, [('out_file', 'in_file')]
                 ),  # Brain # noqa
                (mask_node, t12b0_reg_node, [('out_file', 'reference')
                                             ]),  # B0 brain-masked # noqa
                # WM Transformation
                (wm_mask_conv_node, wm_transform_node,
                 [('out_file', 'in_file')]),  # Brain mask # noqa
                (mask_node, wm_transform_node, [('out_file', 'reference')
                                                ]),  # BO brain-masked # noqa
                (t12b0_reg_node, wm_transform_node, [
                    ('out_matrix_file', 'in_matrix_file')
                ]),  # T1-to-B0 matrix file # noqa
                # FSL flirt matrix to MRtrix matrix Conversion
                (t1_brain_conv_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_source_image')]),  # noqa
                (mask_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_reference_image')]),  # noqa
                (t12b0_reg_node, fsl2mrtrix_conv_node,
                 [('out_matrix_file', 'in_flirt_matrix')]),  # noqa
                # Apply registration without resampling on parcellations
                (label_convert_node, parc_transform_node,
                 [('out_file', 'in_files')]),  # noqa
                (fsl2mrtrix_conv_node, parc_transform_node,
                 [('out_mrtrix_matrix', 'linear_transform')]),  # noqa
                (caps_filenames_node, parc_transform_node,
                 [('nodes', 'out_filename')]),  # noqa
            ])
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                (wm_transform_node, tck_gen_node, [('out_file', 'seed_image')
                                                   ]),  # noqa
                (parc_transform_node, conn_gen_node, [('out_file', 'in_parc')
                                                      ]),  # noqa
                (parc_transform_node, self.output_node, [('out_file', 'nodes')
                                                         ]),  # noqa
            ])
        elif self.parameters['dwi_space'] == 'T1w':
            self.connect([
                (self.input_node, tck_gen_node, [('wm_mask_file', 'seed_image')
                                                 ]),  # noqa
                (label_convert_node, conn_gen_node, [('out_file', 'in_parc')
                                                     ]),  # noqa
                (label_convert_node, self.output_node, [('out_file', 'nodes')
                                                        ]),  # noqa
            ])
        else:
            raise ClinicaCAPSError(
                'Bad preprocessed DWI space. Please check your CAPS '
                'folder.')
        # Outputs
        # -------
        self.connect([
            (resp_estim_node, self.output_node, [('wm_file', 'response')]),
            (fod_estim_node, self.output_node, [('wm_odf', 'fod')]),
            (tck_gen_node, self.output_node, [('out_file', 'tracts')]),
            (conn_gen_node, self.output_node, [('out_file', 'connectomes')]),
            (self.input_node, print_end_message, [('dwi_file',
                                                   'in_bids_or_caps_file')]),
            (conn_gen_node, print_end_message, [('out_file', 'final_file')]),
        ])
예제 #4
0
    def preprocess_dwi_data(self,
                            data,
                            index,
                            acqp,
                            atlas2use,
                            ResponseSD_algorithm='tournier',
                            fod_algorithm='csd',
                            tract_algorithm='iFOD2',
                            streamlines_number='10M'):
        '''
        preprocessing of dwi data and connectome extraction

        Parameters
        ----------

        subjects_dir = path to the subjects' folders
        data: tuple |
            a tuple having the path to dwi, bvecs and bvals files. It is obtained
            using the function grab_data()
        index: str |
            Name of text file specifying the relationship between the images in
            --imain and the information in --acqp and --topup. E.g. index.txt
        acqp: str |
            Name of text file with information about the acquisition of the images
            in --imain
        atlas2use: str |
             The input node parcellation image
        ResponseSD_algorithm (optional): str |
             Select the algorithm to be used to complete the script operation;
             Options are: dhollander, fa, manual, msmt_5tt, tax, tournier
             (Default is 'tournier')
        fod_algorithm (optional): str |
             The algorithm to use for FOD estimation. (options are: csd,msmt_csd)
             (Default is 'csd')
        tract_algorithm (optional): str |
            specify the tractography algorithm to use. Valid choices are: FACT,
            iFOD1, iFOD2, Nulldist1, Nulldist2, SD_Stream, Seedtest, Tensor_Det,
            Tensor_Prob (Default is 'iFOD2')
        streamlines_number (optional): str |
            set the desired number of streamlines (Default is '10M')
    '''

        if len(data[0]) != len(data[1]):
            raise ValueError(
                'dwi datas do not have the same shape of bvec files')
        if len(data[0]) != len(data[2]):
            raise ValueError(
                'dwi datas do not have the same shape of bval files')
        if len(data[1]) != len(data[2]):
            raise ValueError(
                'bvec files do not have the same shape of bvec files')

        for subj in range(len(data[0])):
            print('Extracting B0 volume for subject', subj)
            self.roi = fsl.ExtractROI(
                in_file=data[0][subj],
                roi_file=os.path.join(
                    os.path.split(data[0][subj])[0] + '/' +
                    os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                    '_nodiff.nii.gz'),
                t_min=0,
                t_size=1)
            self.roi.run()

            print('Converting into .mif for subject', subj)
            self.mrconvert = mrt.MRConvert()
            self.mrconvert.inputs.in_file = data[0][subj]
            self.mrconvert.inputs.grad_fsl = (data[1][subj], data[2][subj])
            self.mrconvert.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif')
            self.mrconvert.run()

            print('Denoising data for subject', subj)
            self.denoise = mrt.DWIDenoise()
            self.denoise.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif')
            self.denoise.inputs.noise = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_noise.mif')
            self.denoise.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.mif')
            self.denoise.run()

            self.denoise_convert = mrt.MRConvert()
            self.denoise_convert.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.mif')
            self.denoise_convert.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.nii.gz')
            self.denoise_convert.run()

            print('Skull stripping for subject', subj)
            self.mybet = fsl.BET()
            self.mybet.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_nodiff.nii.gz')
            self.mybet.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain.nii.gz')
            self.mybet.inputs.frac = 0.1
            self.mybet.inputs.robust = True
            self.mybet.inputs.mask = True
            self.mybet.run()

            print('Running Eddy for subject', subj)
            self.eddy = Eddy()
            self.eddy.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised.nii.gz')
            self.eddy.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.eddy.inputs.in_acqp = acqp
            self.eddy.inputs.in_bvec = data[1][subj]
            self.eddy.inputs.in_bval = data[2][subj]
            self.eddy.inputs.out_base = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.nii.gz')
            self.eddy.run()

            print('Running Bias Correction for subject', subj)
            self.bias_correct = mrt.DWIBiasCorrect()
            self.bias_correct.inputs.use_ants = True
            self.bias_correct.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.nii.gz')
            self.bias_correct.inputs.grad_fsl = (os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj])
            self.bias_correct.inputs.bias = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_bias.mif')
            self.bias_correct.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.bias_correct.run()

            print('Calculating Response function for subject', subj)
            self.resp = mrt.ResponseSD()
            self.resp.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.resp.inputs.algorithm = ResponseSD_algorithm
            self.resp.inputs.grad_fsl = (os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj])
            self.resp.inputs.wm_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.resp.run()

            print('Estimating FOD for subject', subj)
            self.fod = mrt.EstimateFOD()
            self.fod.inputs.algorithm = fod_algorithm
            self.fod.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_dwi_denoised_eddy_unbiased.mif')
            self.fod.inputs.wm_txt = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.fod.inputs.mask_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.fod.inputs.grad_fsl = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_response.txt')
            self.fod.run()

            print('Extracting whole brain tract for subject', subj)
            self.tk = mrt.Tractography()
            self.tk.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] + 'fods.mif')
            self.tk.inputs.roi_mask = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.tk.inputs.algorithm = tract_algorithm
            self.tk.inputs.seed_image = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_denoised_brain_mask.nii.gz')
            self.tk.inputs.select = streamlines_number
            self.tk.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_whole_brain_' + streamlines_number + '.tck')
            self.tk.run()

            print('Extracting connectome for subject', subj)
            self.mat = mrt.BuildConnectome()
            self.mat.inputs.in_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_whole_brain_' + streamlines_number + '.tck')
            self.mat.inputs.in_parc = atlas2use
            self.mat.inputs.out_file = os.path.join(
                os.path.split(data[0][subj])[0] + '/' +
                os.path.split(data[0][0])[1].split(".nii.gz")[0] +
                '_connectome.csv')
            self.mat.run()
import os 
from os.path import abspath
from datetime import datetime
from IPython.display import Image
import pydot
from nipype import Workflow, Node, MapNode, Function, config
from nipype.interfaces.fsl import TOPUP, ApplyTOPUP, BET, ExtractROI,  Eddy, FLIRT, FUGUE
from nipype.interfaces.fsl.maths import MathsCommand
import nipype.interfaces.utility as util 
import nipype.interfaces.mrtrix3 as mrt
#Requirements for the workflow to run smoothly: All files as in NIfTI-format and named according to the following standard: 
#Images are from the tonotopy DKI sequences on the 7T Philips Achieva scanner in Lund. It should work with any DKI sequence and possibly also a standard DTI but the setting for B0-corrections, epi-distortion corrections and eddy current corrections will be wrong. 
#DKI file has a base name shared with bvec and bval in FSL format. E.g. "DKI.nii.gz" "DKI.bvec" and "DKI.bval". 
#There is one b0-volume with reversed (P->A) phase encoding called DKIbase+_revenc. E.g. "DKI_revenc.nii.gz". 
#Philips B0-map magnitude and phase offset (in Hz) images. 
#One input file for topup describing the images as specified by topup. 
#Set nbrOfThreads to number of available CPU threads to run the analyses. 
### Need to make better revenc for the 15 version if we choose to use it (i.e. same TE and TR)
#Set to relevant directory/parameters
datadir=os.path.abspath("/Users/ling-men/Documents/MRData/testDKI")
rawDKI_base='DKI_15' 
B0map_base = 'B0map'
nbrOfThreads=6
print_graph = True 
acqparam_file = os.path.join(datadir,'acqparams.txt')
index_file = os.path.join(datadir,'index.txt')
####
#config.enable_debug_mode()
DKI_nii=os.path.join(datadir, rawDKI_base+'.nii.gz')
DKI_bval=os.path.join(datadir, rawDKI_base+'.bval')
예제 #6
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.mrtrix3 as mrtrix3
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        from nipype.interfaces.mrtrix3.tracking import Tractography
        from nipype.interfaces.mrtrix.preprocess import MRTransform

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.utils.exceptions import ClinicaCAPSError
        from clinica.utils.mri_registration import (
            convert_flirt_transformation_to_mrtrix_transformation,
        )

        # Nodes
        # =====
        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction", interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = "t"
        select_node = npe.Node(name="Reg-0-DWI-B0Selection", interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking", interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(
            name="Reg-1-T12B0Registration",
            interface=fsl.FLIRT(
                dof=6,
                interp="spline",
                cost="normmi",
                cost_func="normmi",
            ),
        )
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(
            name="Reg-0-T1-T1BrainConvertion", interface=fs.MRIConvert()
        )
        wm_mask_conv_node = npe.Node(
            name="Reg-0-T1-WMMaskConvertion", interface=fs.MRIConvert()
        )

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(
            name="Reg-2-WMTransformation", interface=fsl.ApplyXFM()
        )
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=["in_file", "in_config", "in_lut", "out_file"],
            interface=mrtrix3.LabelConvert(),
        )
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name="Reg-2-FSL2MrtrixConversion",
            interface=niu.Function(
                input_names=[
                    "in_source_image",
                    "in_reference_image",
                    "in_flirt_matrix",
                    "name_output_matrix",
                ],
                output_names=["out_mrtrix_matrix"],
                function=convert_flirt_transformation_to_mrtrix_transformation,
            ),
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform(),
        )

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(
            name="1a-ResponseEstimation", interface=mrtrix3.ResponseSD()
        )
        resp_estim_node.inputs.algorithm = "tournier"

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation", interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = "csd"

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration", interface=Tractography())
        tck_gen_node.inputs.select = self.parameters["n_tracks"]
        tck_gen_node.inputs.algorithm = "iFOD2"

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(
            name="3-ConnectomeGeneration",
            iterfield=["in_parc", "out_file"],
            interface=mrtrix3.BuildConnectome(),
        )

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file"],
                function=utils.print_begin_pipeline,
            ),
            iterfield="in_bids_or_caps_file",
            name="WriteBeginMessage",
        )

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file", "final_file"],
                function=utils.print_end_pipeline,
            ),
            iterfield=["in_bids_or_caps_file"],
            name="WriteEndMessage",
        )

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name="CAPSFilenamesGeneration",
            interface=niu.Function(
                input_names="dwi_file",
                output_names=self.get_output_fields(),
                function=utils.get_caps_filenames,
            ),
        )

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        # fmt: off
        self.connect(
            [
                (self.input_node, print_begin_message, [("dwi_file", "in_bids_or_caps_file")]),
                (self.input_node, caps_filenames_node, [("dwi_file", "dwi_file")]),
                # Response Estimation
                (self.input_node, resp_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (self.input_node, resp_estim_node, [("dwi_brainmask_file", "in_mask")]),  # B0 brain mask
                (self.input_node, resp_estim_node, [("grad_fsl", "grad_fsl")]),  # bvecs and bvals
                (caps_filenames_node, resp_estim_node, [("response", "wm_file")]),  # output response filename
                # FOD Estimation
                (self.input_node, fod_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (resp_estim_node, fod_estim_node, [("wm_file", "wm_txt")]),  # Response (txt file)
                (self.input_node, fod_estim_node, [("dwi_brainmask_file", "mask_file")]),  # B0 brain mask
                (self.input_node, fod_estim_node, [("grad_fsl", "grad_fsl")]),  # T1-to-B0 matrix file
                (caps_filenames_node, fod_estim_node, [("fod", "wm_odf")]),  # output odf filename
                # Tracts Generation
                (fod_estim_node, tck_gen_node, [("wm_odf", "in_file")]),  # ODF file
                (caps_filenames_node, tck_gen_node, [("tracts", "out_file")]),  # output tck filename
                # Label Conversion
                (self.input_node, label_convert_node, [("atlas_files", "in_file")]),  # atlas image files
                (caps_filenames_node, label_convert_node, [("nodes", "out_file")]),  # converted atlas image filenames
                # Connectomes Generation
                (tck_gen_node, conn_gen_node, [("out_file", "in_file")]),
                (caps_filenames_node, conn_gen_node, [("connectomes", "out_file")]),
            ]
        )
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    # MGZ Files Conversion
                    (self.input_node, t1_brain_conv_node, [("t1_brain_file", "in_file")]),
                    (self.input_node, wm_mask_conv_node, [("wm_mask_file", "in_file")]),
                    # B0 Extraction
                    (self.input_node, split_node, [("dwi_file", "in_file")]),
                    (split_node, select_node, [("out_files", "inlist")]),
                    # Masking
                    (select_node, mask_node, [("out", "in_file")]),  # B0
                    (self.input_node, mask_node, [("dwi_brainmask_file", "mask_file")]),  # Brain mask
                    # T1-to-B0 Registration
                    (t1_brain_conv_node, t12b0_reg_node, [("out_file", "in_file")]),  # Brain
                    (mask_node, t12b0_reg_node, [("out_file", "reference")]),  # B0 brain-masked
                    # WM Transformation
                    (wm_mask_conv_node, wm_transform_node, [("out_file", "in_file")]),  # Brain mask
                    (mask_node, wm_transform_node, [("out_file", "reference")]),  # BO brain-masked
                    (t12b0_reg_node, wm_transform_node, [("out_matrix_file", "in_matrix_file")]),  # T1-to-B0 matrix file
                    # FSL flirt matrix to MRtrix matrix Conversion
                    (t1_brain_conv_node, fsl2mrtrix_conv_node, [("out_file", "in_source_image")]),
                    (mask_node, fsl2mrtrix_conv_node, [("out_file", "in_reference_image")]),
                    (t12b0_reg_node, fsl2mrtrix_conv_node, [("out_matrix_file", "in_flirt_matrix")]),
                    # Apply registration without resampling on parcellations
                    (label_convert_node, parc_transform_node, [("out_file", "in_files")]),
                    (fsl2mrtrix_conv_node, parc_transform_node, [("out_mrtrix_matrix", "linear_transform")]),
                    (caps_filenames_node, parc_transform_node, [("nodes", "out_filename")]),
                ]
            )
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    (wm_transform_node, tck_gen_node, [("out_file", "seed_image")]),
                    (parc_transform_node, conn_gen_node, [("out_file", "in_parc")]),
                    (parc_transform_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        elif self.parameters["dwi_space"] == "T1w":
            self.connect(
                [
                    (self.input_node, tck_gen_node, [("wm_mask_file", "seed_image")]),
                    (label_convert_node, conn_gen_node, [("out_file", "in_parc")]),
                    (label_convert_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        else:
            raise ClinicaCAPSError(
                "Bad preprocessed DWI space. Please check your CAPS folder."
            )
        # Outputs
        # -------
        self.connect(
            [
                (resp_estim_node, self.output_node, [("wm_file", "response")]),
                (fod_estim_node, self.output_node, [("wm_odf", "fod")]),
                (tck_gen_node, self.output_node, [("out_file", "tracts")]),
                (conn_gen_node, self.output_node, [("out_file", "connectomes")]),
                (self.input_node, print_end_message, [("dwi_file", "in_bids_or_caps_file")]),
                (conn_gen_node, print_end_message, [("out_file", "final_file")]),
            ]
        )
예제 #7
0
mrtrix3_mrconvert = pe.Node(interface=mrtrix3.MRConvert(),
                            name='mrtrix3_mrconvert')
mrtrix3_mrconvert.inputs.grad_fsl = ("/Users/bsms9gep/data/bvecs",
                                     "/Users/bsms9gep/data/bvals")

#Wraps the executable command ``dwi2mask``.
mrtrix3_brain_mask = pe.Node(interface=mrtrix3.BrainMask(),
                             name='mrtrix3_brain_mask')

#Wraps the executable command ``dwibiascorrect``.
mrtrix3_dwibias_correct = pe.Node(interface=mrtrix3.DWIBiasCorrect(),
                                  name='mrtrix3_dwibias_correct')
mrtrix3_dwibias_correct.inputs.out_file = 'dwi_unbiased.mif'

#Wraps the executable command ``dwi2response``.
mrtrix3_response_sd = pe.Node(interface=mrtrix3.ResponseSD(),
                              name='mrtrix3_response_sd')
mrtrix3_response_sd.inputs.algorithm = 'dhollander'
mrtrix3_response_sd.inputs.gm_file = 'gm.txt'
mrtrix3_response_sd.inputs.csf_file = 'csf.txt'
mrtrix3_response_sd.inputs.max_sh = (8, 8, 8)

#Wraps the executable command ``dwi2fod``.
mrtrix3_estimate_fod = pe.Node(interface=mrtrix3.EstimateFOD(),
                               name='mrtrix3_estimate_fod')
mrtrix3_estimate_fod.inputs.algorithm = 'msmt_csd'
mrtrix3_estimate_fod.inputs.max_sh = Undefined

#Generic datasink module to store structured outputs
io_data_sink = pe.Node(interface=io.DataSink(), name='io_data_sink')
io_data_sink.inputs.base_directory = "/Users/bsms9gep/results"
예제 #8
0
def create_DWI_workflow(
    subject_list,
    bids_dir,
    work_dir,
    out_dir,
    bids_templates,
):

    # create initial workflow
    wf = Workflow(name='DWI', base_dir=work_dir)

    # use infosource to iterate workflow across subject list
    n_infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                        name="subject_source"
                        # input: 'subject_id'
                        # output: 'subject_id'
                        )
    # runs the node with subject_id = each element in subject_list
    n_infosource.iterables = ('subject_id', subject_list)

    # select matching files from bids_dir
    n_selectfiles = Node(interface=SelectFiles(templates=bids_templates,
                                               base_directory=bids_dir),
                         name='get_subject_data')
    wf.connect([(n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])
                ])

    # DWIDenoise
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    n_denoise = Node(interface=mrt.DWIDenoise(), name='n_denoise')
    wf.connect([(n_selectfiles, n_denoise, [('DWI_all', 'in_file')])])

    # datasink
    n_datasink = Node(interface=DataSink(base_directory=out_dir),
                      name='datasink')

    wf.connect([(n_selectfiles, n_datasink, [('all_b0_PA',
                                              'all_b0_PA_unchanged')]),
                (n_denoise, n_datasink, [('out_file', 'DWI_all_denoised')])])

    ########## I'VE ADDED IN ##########################################################################
    # MRDeGibbs
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    n_degibbs = Node(
        interface=mrt.MRDeGibbs(out_file='DWI_all_denoised_degibbs.mif'),
        name='n_degibbs')
    wf.connect([(n_denoise, n_degibbs, [('out_file', 'in_file')])])

    wf.connect([(n_degibbs, n_datasink, [('out_file',
                                          'DWI_all_denoised_degibbs.mif')])])

    # DWI Extract
    n_dwiextract = Node(interface=mrt.DWIExtract(bzero=True,
                                                 out_file='b0vols.mif'),
                        name='n_dwiextract')

    wf.connect([(n_degibbs, n_dwiextract, [('out_file', 'in_file')])])

    wf.connect([(n_dwiextract, n_datasink, [('out_file', 'noddi_b0_degibbs')])
                ])

    # MRcat
    n_mrcat = Node(
        interface=mrcatfunc.MRCat(
            #axis=3,
            out_file='b0s.mif'),
        name='n_mrcat')

    # Connect DTI_B0_PA to mrcat node
    wf.connect([(n_selectfiles, n_mrcat, [('DTI_B0_PA', 'in_file1')])])

    wf.connect([(n_dwiextract, n_mrcat, [('out_file', 'in_file2')])])

    # Output the mrcat file into file 'noddi_and_PA_b0s.mif'
    wf.connect([(n_mrcat, n_datasink, [('out_file', 'noddi_and_PA_b0s.mif')])])

    # DWIfslpreproc
    n_dwifslpreproc = Node(interface=preprocfunc.DWIFslPreProc(
        out_file='preprocessedDWIs.mif', use_header=True),
                           name='n_dwifslpreproc')

    # Connect output of degibbs to dwifslpreproc node
    wf.connect([(n_degibbs, n_dwifslpreproc, [('out_file', 'in_file')])])
    # Connect output of mrcat to se_epi input
    wf.connect([(n_mrcat, n_dwifslpreproc, [('out_file', 'se_epi_file')])])
    # Put output of dwifslpreproc into 'preprocessedDWIs.mif'
    wf.connect([(n_dwifslpreproc, n_datasink, [('out_file',
                                                'preprocessedDWIs.mif')])])

    # DWI bias correct
    n_dwibiascorrect = Node(
        interface=preprocess.DWIBiasCorrect(use_ants=True),
        name='n_dwibiascorrect',
    )

    wf.connect([(n_dwifslpreproc, n_dwibiascorrect, [('out_file', 'in_file')])
                ])
    wf.connect([(n_dwibiascorrect, n_datasink,
                 [('out_file', 'ANTSpreprocessedDWIs.mif')])])

    #DWI2mask
    n_dwi2mask = Node(interface=mrt.BrainMask(out_file='mask.mif'),
                      name='n_dwi2mask')
    wf.connect([(n_dwibiascorrect, n_dwi2mask, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2mask, n_datasink, [('out_file', 'mask.mif')])])

    ## A) Fixel-based analysis
    #DWI2response
    n_dwi2response = Node(interface=mrt.ResponseSD(algorithm='dhollander',
                                                   wm_file='wm_res.txt',
                                                   gm_file='gm_res.txt',
                                                   csf_file='csf_res.txt'),
                          name='n_dwi2response')

    wf.connect([(n_dwibiascorrect, n_dwi2response, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_datasink, [('wm_file', 'wm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('gm_file', 'gm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('csf_file', 'csf_res.txt')])])

    #DWI2fod
    n_dwi2fod = Node(interface=mrt.ConstrainedSphericalDeconvolution(
        algorithm='msmt_csd',
        wm_odf='wmfod.mif',
        gm_odf='gmfod.mif',
        csf_odf='csffod.mif'),
                     name='n_dwi2fod')
    # connect outputs of dwi2fod into dwi2response
    wf.connect([(n_dwibiascorrect, n_dwi2fod, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('wm_file', 'wm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('gm_file', 'gm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('csf_file', 'csf_txt')])])
    # output wmfod file from dwi2fod
    wf.connect([(n_dwi2fod, n_datasink, [('wm_odf', 'wmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('gm_odf', 'gmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('csf_odf', 'csffod.mif')])])

    #mrconvert to extract Z component of wmfod
    n_mrconvert_fod = Node(interface=utils.MRConvert(out_file='Zwmfod.mif',
                                                     coord=[3, 0]),
                           name='n_mrconvert_fod')

    wf.connect([(n_dwi2fod, n_mrconvert_fod, [('wm_odf', 'in_file')])])

    wf.connect([(n_mrconvert_fod, n_datasink, [('out_file', 'Zwmfod.mif')])])

    # Concatenate all wm, gm, csf fod files to see their distribution throughout Brain
    n_mrcat_fod = Node(interface=mrcatfunc.MRCat(out_file='vf.mif'),
                       name='n_mrcat_fod')
    # Connect Zwmfod, gmfod and csffod as inputs
    wf.connect([(n_mrconvert_fod, n_mrcat_fod, [('out_file', 'in_file1')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('gm_odf', 'in_file2')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('csf_odf', 'in_file3')])])
    # Output the mrcat file into file into 'vf.mif'
    wf.connect([(n_mrcat_fod, n_datasink, [('out_file', 'vf.mif')])])

    #fod2fixel wmfod.mif wmfixels -fmls_peak_value 0 -fmls_integral 0.10 -afd afd.mif -peak peak.mif -disp disp.mif
    # OUTPUTS: -afd afd.mif -peak peak.mif -disp disp.mif
    n_fod2fixel = Node(
        interface=fod2fixelfunc.fod2fixel(
            out_file='wmfixels',
            #afd_file = 'afd.mif',
            peak_file='peak.mif',
            disp_file='disp.mif'),
        name='n_fod2fixel')
    # let the peak value parameter be trialed as multiple values
    n_fod2fixel.iterables = ('fmls_peak_value', [0, 0.10, 0.50])
    n_fod2fixel.iterables = ('fmls_integral', [0, 0.10, 0.50])

    # obtain wm fibre image as input
    wf.connect([(n_dwi2fod, n_fod2fixel, [('wm_odf', 'in_file')])])
    # ouputs of fod2fixel
    wf.connect([(n_fod2fixel, n_datasink, [('out_file', 'wmfixels')])])
    wf.connect([(n_fod2fixel, n_datasink, [('afd_file', 'afd.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('peak_file', 'peak.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('disp_file', 'disp.mif')])])

    ## Fixel2peaks
    n_fixel2peaks = Node(interface=fixel2peaksfunc.fixel2peaks(
        out_file='peaks_wmdirections.mif'),
                         name='n_fixel2peaks')

    n_fixel2peaks.iterables = ('number', [1, 2, 3])

    # obtain directions file in output folder of fod2fixel, as input
    wf.connect([(n_fod2fixel, n_fixel2peaks, [('out_file', 'in_file')])])
    # ouputs of fixel2peaks
    wf.connect([(n_fixel2peaks, n_datasink, [('out_file',
                                              'peaks_wmdirections.mif')])])

    #mrmath to find normalised value of peak WM directions
    n_mrmath = Node(interface=mrt.MRMath(
        axis=3, operation='norm', out_file='norm_peaks_wmdirections.mif'),
                    name='n_mrmath')

    wf.connect([(n_fixel2peaks, n_mrmath, [('out_file', 'in_file')])])

    wf.connect([(n_mrmath, n_datasink, [('out_file',
                                         'norm_peaks_wmdirections.mif')])])

    # mrcalc to divide peak WM direction by normalised value
    n_mrcalc = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                out_file='wm_peak_dir.mif'),
                    name='n_mrcalc')

    wf.connect([(n_fixel2peaks, n_mrcalc, [('out_file', 'in_file1')])])

    wf.connect([(n_mrmath, n_mrcalc, [('out_file', 'in_file2')])])

    wf.connect([(n_mrcalc, n_datasink, [('out_file', 'WM_peak_dir.mif')])])

    #mrconvert to extract Z component of peak directions
    n_mrconvert2 = Node(interface=utils.MRConvert(
        out_file='Zpeak_WM_Directions.mif', coord=[3, 2]),
                        name='n_mrconvert2')

    wf.connect([(n_mrcalc, n_mrconvert2, [('out_file', 'in_file')])])

    wf.connect([(n_mrconvert2, n_datasink, [('out_file',
                                             'Zpeak_WM_Directions.mif')])])

    # mrcalc to find absolute value
    n_mrcalc2 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='absZpeak_WM_Directions.mif'),
                     name='n_mrcalc2')

    wf.connect([(n_mrconvert2, n_mrcalc2, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc2, n_datasink, [('out_file',
                                          'absZpeak_WM_Directions.mif')])])

    # mrcalc to get angle by doing inverse cosine
    n_mrcalc3 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acosZpeak_WM_Directions.mif'),
                     name='n_mrcalc3')

    wf.connect([(n_mrcalc2, n_mrcalc3, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc3, n_datasink, [('out_file',
                                          'acosZpeak_WM_Directions.mif')])])

    # mrcalc to convert angle to degrees
    n_mrcalc4 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply', operand=180, out_file='Fixel1_Z_angle.mif'),
                     name='n_mrcalc4')

    wf.connect([(n_mrcalc3, n_mrcalc4, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc4, n_datasink, [('out_file', 'Fixel1_Z_angle.mif')])])

    n_mrcalc5 = Node(interface=mrcalcfunc.MRCalc(
        operation='divide',
        operand=3.14159265,
        out_file='Fixel1_Z_cos_deg.mif'),
                     name='n_mrcalc5')

    wf.connect([(n_mrcalc4, n_mrcalc5, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc5, n_datasink, [('out_file', 'Fixel1_Z_cos_deg.mif')])
                ])

    ## B) Tensor-based analysis
    #dwi2tensor
    n_dwi2tensor = Node(interface=mrt.FitTensor(out_file='dti.mif'),
                        name='n_dwi2tensor')

    wf.connect([(n_dwibiascorrect, n_dwi2tensor, [('out_file', 'in_file')])])

    wf.connect([(n_dwi2mask, n_dwi2tensor, [('out_file', 'in_mask')])])

    wf.connect([(n_dwi2tensor, n_datasink, [('out_file', 'dt.mif')])])

    #tensor2metric
    n_tensor2metric = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='eigenvector.mif'),
                           name='n_tensor2metric')

    wf.connect([(n_dwi2tensor, n_tensor2metric, [('out_file', 'input_file')])])

    wf.connect([(n_tensor2metric, n_datasink, [('vector_file',
                                                'eigenvector.mif')])])

    #mrconvert to get Z eigenvector
    n_mrconvert3 = Node(interface=utils.MRConvert(coord=[3, 2],
                                                  out_file='eigenvectorZ.mif'),
                        name='n_mrconvert3')

    wf.connect([(n_tensor2metric, n_mrconvert3, [('vector_file', 'in_file')])])

    wf.connect([(n_mrconvert3, n_datasink, [('out_file', 'eigenvectorZ.mif')])
                ])

    #ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # mrcalc to find absolute value
    n_mrcalc6 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='abs_eigenvectorZ.mif'),
                     name='n_mrcalc6')

    wf.connect([(n_mrconvert3, n_mrcalc6, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc6, n_datasink, [('out_file', 'abs_eigenvectorZ.mif')])
                ])

    # mrcalc to get angle by doing inverse cosine
    n_mrcalc7 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_eigenvectorZ.mif'),
                     name='n_mrcalc7')

    wf.connect([(n_mrcalc6, n_mrcalc7, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc7, n_datasink, [('out_file', 'acos_eigenvectorZ.mif')
                                         ])])

    # mrcalc to convert angle to degrees
    n_mrcalc8 = Node(
        interface=mrcalcfunc.MRCalc(operation='multiply',
                                    operand=180,
                                    out_file='degrees_eigenvectorZ.mif'),
        name='n_mrcalc8')

    wf.connect([(n_mrcalc7, n_mrcalc8, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc8, n_datasink, [('out_file',
                                          'degrees_eigenvectorZ.mif')])])

    n_mrcalc9 = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                 operand=3.14159265,
                                                 out_file='dti_z_cos_deg.mif'),
                     name='n_mrcalc9')

    wf.connect([(n_mrcalc8, n_mrcalc9, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc9, n_datasink, [('out_file', 'dti_z_cos_deg.mif')])])

    # Difference image between fixel based and tensor based outputs
    n_mrcalc10 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diff_imag_tensor_minus_fixel.mif'),
                      name='n_mrcalc10')

    wf.connect([(n_mrcalc9, n_mrcalc10, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc5, n_mrcalc10, [('out_file', 'in_file2')])])

    wf.connect([(n_mrcalc10, n_datasink,
                 [('out_file', 'diff_imag_tensor_minus_fixel.mif')])])

    #################################################################################3
    return wf
예제 #9
0
def dholl_preproc_wf(shells=[0, 1000, 2000],
                     lmax=[0, 8, 8],
                     sshell=False,
                     noreorient=False,
                     template_dir=None,
                     template_label=None,
                     wdir=None,
                     nthreads=1,
                     name='dholl_preproc_wf'):
    """
    Set up Dhollander response preproc workflow
    No assumption of registration to T1w space is made
    """

    if template_dir is None or template_label is None:
        print("Missing template info")
        raise IOError

    # Grab template data
    templateGrabber = io.getTemplate(template_dir=template_dir,
                                     template_label=template_label,
                                     wdir=wdir)

    # Convert nii to mif
    dwiConvert = pe.Node(mrt.MRConvert(), name='dwiConvert')
    dwiConvert.base_dir = wdir
    dwiConvert.inputs.nthreads = nthreads
    dwiConvert.interface.num_threads = nthreads

    # dwi2response - included but not used
    dwi2response = pe.Node(mrt.ResponseSD(), name='dwi2response')
    dwi2response.base_dir = wdir
    dwi2response.inputs.algorithm = 'dhollander'
    dwi2response.inputs.wm_file = 'space-dwi_model-CSD_WMResp.txt'
    dwi2response.inputs.gm_file = 'space-dwi_model-CSD_GMResp.txt'
    dwi2response.inputs.csf_file = 'space-dwi_model-CSD_CSFResp.txt'
    dwi2response.inputs.max_sh = lmax
    dwi2response.inputs.shell = shells
    dwi2response.inputs.nthreads = nthreads
    dwi2response.interface.num_threads = nthreads

    # Convert mask (nii) to mif
    maskConvert = pe.Node(mrt.MRConvert(), name='maskConvert')
    maskConvert.base_dir = wdir
    maskConvert.inputs.nthreads = nthreads
    maskConvert.interface.num_threads = nthreads

    # dwi2fod
    dwi2fod = pe.Node(mrt.EstimateFOD(), name='dwi2fod')
    dwi2fod.base_dir = wdir
    dwi2fod.inputs.algorithm = 'msmt_csd'
    dwi2fod.inputs.shell = shells
    dwi2fod.inputs.wm_odf = 'space-dwi_model-CSD_WMFOD.mif'
    if sshell is False:
        dwi2fod.inputs.gm_odf = 'space-dwi_model-CSD_GMFOD.mif'
    dwi2fod.inputs.csf_odf = 'space-dwi_model-CSD_CSFFOD.mif'
    dwi2fod.inputs.nthreads = nthreads
    dwi2fod.interface.num_threads = nthreads

    # mtnormalise
    mtnormalise = pe.Node(mrt.MTNormalise(), name='mtnormalise')
    mtnormalise.base_dir = wdir
    mtnormalise.inputs.out_wm = 'space-dwi_model-CSD_WMFODNorm.mif'
    if sshell is False:
        mtnormalise.inputs.out_gm = 'space-dwi_model-CSD_GMFODNorm.mif'
    mtnormalise.inputs.out_csf = 'space-dwi_model-CSD_CSFFODNorm.mif'
    mtnormalise.inputs.nthreads = nthreads
    mtnormalise.interface.num_threads = nthreads

    # Registration
    MRRegister = pe.Node(mrt.MRRegister(), name='MRRegister')
    MRRegister.base_dir = wdir
    # MRRegister.inputs.ref_file = template
    MRRegister.inputs.nl_warp = [
        'from-dwi_to-Template_xfm.mif', 'from-Template_to-dwi_xfm.mif'
    ]
    if noreorient is not False:
        MRRegister.inputs.noreorientation = noreorient
    MRRegister.inputs.nthreads = nthreads
    MRRegister.interface.num_threads = nthreads

    # Transforms
    WarpSelect1 = pe.Node(niu.Select(), name='WarpSelect1')
    WarpSelect1.base_dir = wdir
    WarpSelect1.inputs.index = [0]
    WarpSelect1.interface.num_threads = nthreads

    WarpSelect2 = pe.Node(niu.Select(), name='WarpSelect2')
    WarpSelect2.base_dir = wdir
    WarpSelect2.inputs.index = [1]
    WarpSelect2.interface.num_threads = nthreads

    # Warp data
    MaskTransform = pe.Node(mrt.MRTransform(), name='MaskTransform')
    MaskTransform.base_dir = wdir
    MaskTransform.inputs.out_file = 'space-Template_brainmask.mif'
    MaskTransform.inputs.nthreads = nthreads
    MaskTransform.interface.num_threads = nthreads

    FODTransform = pe.Node(mrt.MRTransform(), name='FODTransform')
    FODTransform.base_dir = wdir
    FODTransform.inputs.out_file = 'space-Template_model-CSD_WMFODNorm.mif'
    FODTransform.inputs.nthreads = nthreads
    FODTransform.interface.num_threads = nthreads

    # Tensor processing
    DWINormalise = pe.Node(mrt.DWINormalise(), name='DWINormalise')
    DWINormalise.base_dir = wdir
    DWINormalise.inputs.out_file = 'space-dwi_dwiNorm.mif'
    DWINormalise.inputs.nthreads = nthreads
    DWINormalise.interface.num_threads = nthreads

    DWITransform = pe.Node(mrt.MRTransform(), name='DWITransform')
    DWITransform.base_dir = wdir
    DWITransform.inputs.out_file = 'space-Template_dwiNorm.mif'
    DWITransform.inputs.nthreads = nthreads
    DWITransform.interface.num_threads = nthreads

    FitTensor = pe.Node(mrt.FitTensor(), name='FitTensor')
    FitTensor.base_dir = wdir
    FitTensor.inputs.out_file = 'space-Template_desc-WLS_model-DTI_tensor.mif'
    FitTensor.inputs.nthreads = nthreads
    FitTensor.interface.num_threads = nthreads

    TensorMetrics = pe.Node(mrt.TensorMetrics(), name='TensorMetrics')
    TensorMetrics.base_dir = wdir
    TensorMetrics.inputs.out_fa = 'space-Template_model-DTI_FA.mif'
    TensorMetrics.inputs.out_adc = 'space-Template_model-DTI_MD.mif'
    TensorMetrics.inputs.out_ad = 'space-Template_model-DTI_AD.mif'
    TensorMetrics.inputs.out_rd = 'space-Template_model-DTI_RD.mif'
    TensorMetrics.inputs.nthreads = nthreads
    TensorMetrics.interface.num_threads = nthreads

    # Build workflow
    workflow = pe.Workflow(name=name)

    # Single shell
    if sshell is True:
        workflow.connect([
            # Compute FOD
            (dwiConvert, dwi2response, [('out_file', 'in_file')]),
            (dwiConvert, dwi2fod, [('out_file', 'in_file')]),
            (dwi2response, dwi2fod, [('wm_file', 'wm_txt'),
                                     ('csf_file', 'csf_txt')]),
            (dwi2fod, mtnormalise, [('wm_odf', 'in_wm'),
                                    ('csf_odf', 'in_csf')]),
            (maskConvert, dwi2response, [('out_file', 'in_mask')]),
            (maskConvert, dwi2fod, [('out_file', 'mask_file')]),
            (maskConvert, mtnormalise, [('out_file', 'mask')]),
            (maskConvert, MRRegister, [('out_file', 'mask1')]),
            (templateGrabber, MRRegister, [('wm_fod', 'ref_file'),
                                           ('mask', 'mask2')]),
            (mtnormalise, MRRegister, [('out_wm', 'in_file')]),
            (MRRegister, WarpSelect1, [('nl_warp', 'inlist')]),
            (MRRegister, WarpSelect2, [('nl_warp', 'inlist')]),
            (maskConvert, MaskTransform, [('out_file', 'in_file')]),
            (WarpSelect1, MaskTransform, [('out', 'warp')]),
            (mtnormalise, FODTransform, [('out_wm', 'in_file')]),
            (WarpSelect1, FODTransform, [('out', 'warp')]),
            # Compute tensors
            (dwiConvert, DWINormalise, [('out_file', 'in_file')]),
            (maskConvert, DWINormalise, [('out_file', 'in_mask')]),
            (DWINormalise, DWITransform, [('out_file', 'in_file')]),
            (WarpSelect1, DWITransform, [('out', 'warp')]),
            (DWITransform, FitTensor, [('out_file', 'in_file')]),
            (MaskTransform, FitTensor, [('out_file', 'in_mask')]),
            (FitTensor, TensorMetrics, [('out_file', 'in_file')]),
            (MaskTransform, TensorMetrics, [('out_file', 'in_mask')])
        ])

    # For multi-shell
    else:
        workflow.connect([
            # Compute FOD
            (dwiConvert, dwi2response, [('out_file', 'in_file')]),
            (dwiConvert, dwi2fod, [('out_file', 'in_file')]),
            (dwi2response, dwi2fod, [('wm_file', 'wm_txt'),
                                     ('gm_file', 'gm_txt'),
                                     ('csf_file', 'csf_txt')]),
            (dwi2fod, mtnormalise, [('wm_odf', 'in_wm'), ('gm_odf', 'in_gm'),
                                    ('csf_odf', 'in_csf')]),
            (maskConvert, dwi2response, [('out_file', 'in_mask')]),
            (maskConvert, dwi2fod, [('out_file', 'mask_file')]),
            (maskConvert, mtnormalise, [('out_file', 'mask')]),
            (maskConvert, MRRegister, [('out_file', 'mask1')]),
            (templateGrabber, MRRegister, [('wm_fod', 'ref_file'),
                                           ('mask', 'mask2')]),
            (mtnormalise, MRRegister, [('out_wm', 'in_file')]),
            (MRRegister, WarpSelect1, [('nl_warp', 'inlist')]),
            (MRRegister, WarpSelect2, [('nl_warp', 'inlist')]),
            (maskConvert, MaskTransform, [('out_file', 'in_file')]),
            (WarpSelect1, MaskTransform, [('out', 'warp')]),
            (mtnormalise, FODTransform, [('out_wm', 'in_file')]),
            (WarpSelect1, FODTransform, [('out', 'warp')]),
            # Compute tensors
            (dwiConvert, DWINormalise, [('out_file', 'in_file')]),
            (maskConvert, DWINormalise, [('out_file', 'in_mask')]),
            (DWINormalise, DWITransform, [('out_file', 'in_file')]),
            (WarpSelect1, DWITransform, [('out', 'warp')]),
            (DWITransform, FitTensor, [('out_file', 'in_file')]),
            (MaskTransform, FitTensor, [('out_file', 'in_mask')]),
            (FitTensor, TensorMetrics, [('out_file', 'in_file')]),
            (MaskTransform, TensorMetrics, [('out_file', 'in_mask')])
        ])

    return workflow
def create_DWI_workflow(
    subject_list,
    bids_dir,
    work_dir,
    out_dir,
    bids_templates,
):

    # create initial workflow
    wf = Workflow(name='DWI', base_dir=work_dir)

    # use infosource to iterate workflow across subject list
    n_infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                        name="subject_source"
                        # input: 'subject_id'
                        # output: 'subject_id'
                        )
    # runs the node with subject_id = each element in subject_list
    n_infosource.iterables = ('subject_id', subject_list)

    # select matching files from bids_dir
    n_selectfiles = Node(interface=SelectFiles(templates=bids_templates,
                                               base_directory=bids_dir),
                         name='get_subject_data')
    wf.connect([(n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])
                ])

    ########## IMPLEMENTING MRTRIX COMMANDS FOR IMAGE ANALYSIS #######################################

    ## 1) Preprocessing of Data
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    # DWIDenoise to remove Gaussian noise
    n_denoise = Node(interface=mrt.DWIDenoise(), name='n_denoise')
    wf.connect([(n_selectfiles, n_denoise, [('DWI_all', 'in_file')])])
    # datasink
    n_datasink = Node(interface=DataSink(base_directory=out_dir),
                      name='datasink')
    # output denoised data into 'DWI_all_denoised'
    wf.connect([(n_selectfiles, n_datasink, [('all_b0_PA',
                                              'all_b0_PA_unchanged')]),
                (n_denoise, n_datasink, [('out_file', 'DWI_all_denoised')])])

    # MRDeGibbs to remove Gibbs ringing artifact
    n_degibbs = Node(
        interface=mrt.MRDeGibbs(out_file='DWI_all_denoised_degibbs.mif'),
        name='n_degibbs')
    # input denoised data into degibbs function
    wf.connect([(n_denoise, n_degibbs, [('out_file', 'in_file')])])
    # output degibbs data into 'DWI_all_denoised_degibbs.mif'
    wf.connect([(n_degibbs, n_datasink, [('out_file',
                                          'DWI_all_denoised_degibbs.mif')])])

    # DWI Extract to extract b0 volumes from multi-b image data
    n_dwiextract = Node(interface=mrt.DWIExtract(bzero=True,
                                                 out_file='b0vols.mif'),
                        name='n_dwiextract')
    # input degibbs data into dwiextract function
    wf.connect([(n_degibbs, n_dwiextract, [('out_file', 'in_file')])])
    # output extracted b0 volume from degibbs data (contains multiple b values)
    wf.connect([(n_dwiextract, n_datasink, [('out_file', 'noddi_b0_degibbs')])
                ])

    # MRcat to combine b0 volumes from input image and reverse phase encoded data
    n_mrcat = Node(
        interface=mrcatfunc.MRCat(
            #axis=3,
            out_file='b0s.mif'),
        name='n_mrcat')
    # input DTI images (all b0 volumes; reverse phase encoded) for concatenating
    wf.connect([(n_selectfiles, n_mrcat, [('DTI_B0_PA', 'in_file1')])])
    # input b0 volumes from NODDI data for concatenating
    wf.connect([(n_dwiextract, n_mrcat, [('out_file', 'in_file2')])])
    # output the mrcat file into 'noddi_and_PA_b0s.mif'
    wf.connect([(n_mrcat, n_datasink, [('out_file', 'noddi_and_PA_b0s.mif')])])

    # DWIfslpreproc for image pre-processing using FSL's eddy tool
    n_dwifslpreproc = Node(interface=preprocfunc.DWIFslPreProc(
        out_file='preprocessedDWIs.mif', use_header=True),
                           name='n_dwifslpreproc')
    # output of degibbs as input for preprocessing
    wf.connect([(n_degibbs, n_dwifslpreproc, [('out_file', 'in_file')])])
    # output of mrcat (extracted b0 volumes) as se_epi input
    wf.connect([(n_mrcat, n_dwifslpreproc, [('out_file', 'se_epi_file')])])
    # output of dwifslpreproc into 'preprocessedDWIs.mif'
    wf.connect([(n_dwifslpreproc, n_datasink, [('out_file',
                                                'preprocessedDWIs.mif')])])

    # DWI bias correct for B1 field inhomogeneity correction
    n_dwibiascorrect = Node(
        interface=preprocess.DWIBiasCorrect(use_ants=True),
        name='n_dwibiascorrect',
    )
    # input preprocessed data
    wf.connect([(n_dwifslpreproc, n_dwibiascorrect, [('out_file', 'in_file')])
                ])
    # output biascorrect data into 'ANTSpreprocessedDWIs.mif'
    wf.connect([(n_dwibiascorrect, n_datasink,
                 [('out_file', 'ANTSpreprocessedDWIs.mif')])])

    # DWI2mask to compute whole brain mask from bias corrected data
    n_dwi2mask = Node(interface=mrt.BrainMask(out_file='mask.mif'),
                      name='n_dwi2mask')
    wf.connect([(n_dwibiascorrect, n_dwi2mask, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2mask, n_datasink, [('out_file', 'mask.mif')])])

    ##################################################################################
    ## 2) Fixel-based analysis
    # DWI2response for etimation of response function for spherical deconvolution
    n_dwi2response = Node(interface=mrt.ResponseSD(algorithm='dhollander',
                                                   wm_file='wm_res.txt',
                                                   gm_file='gm_res.txt',
                                                   csf_file='csf_res.txt'),
                          name='n_dwi2response')
    # input bias corrected data for response function estimation
    wf.connect([(n_dwibiascorrect, n_dwi2response, [('out_file', 'in_file')])])
    # output WM, GM, CSF response text files
    wf.connect([(n_dwi2response, n_datasink, [('wm_file', 'wm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('gm_file', 'gm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('csf_file', 'csf_res.txt')])])

    # DWI2fod for fibre orientation distribution estimation (FOD)
    n_dwi2fod = Node(interface=mrt.ConstrainedSphericalDeconvolution(
        algorithm='msmt_csd',
        wm_odf='wmfod.mif',
        gm_odf='gmfod.mif',
        csf_odf='csffod.mif'),
                     name='n_dwi2fod')
    # utilise dwi2fod response files as input
    wf.connect([(n_dwibiascorrect, n_dwi2fod, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('wm_file', 'wm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('gm_file', 'gm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('csf_file', 'csf_txt')])])
    # output WM, GM and CSF FODs for saving
    wf.connect([(n_dwi2fod, n_datasink, [('wm_odf', 'wmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('gm_odf', 'gmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('csf_odf', 'csffod.mif')])])

    # Mrconvert to select the first volume of the WM file (is best image out of 45 slices of wmfod file)
    n_mrconvert_fod = Node(interface=utils.MRConvert(out_file='Zwmfod.mif',
                                                     coord=[3, 0]),
                           name='n_mrconvert_fod')
    # utilise WM FOD as input
    wf.connect([(n_dwi2fod, n_mrconvert_fod, [('wm_odf', 'in_file')])])
    # output z component of WM FOD
    wf.connect([(n_mrconvert_fod, n_datasink, [('out_file', 'Zwmfod.mif')])])

    # MRcat to concatenate all WM, GM, CSF FOD files to see their distributions throughout Brain
    n_mrcat_fod = Node(interface=mrcatfunc.MRCat(out_file='vf.mif'),
                       name='n_mrcat_fod')
    # connect Zwmfod, gmfod and csffod as inputs
    wf.connect([(n_mrconvert_fod, n_mrcat_fod, [('out_file', 'in_file1')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('gm_odf', 'in_file2')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('csf_odf', 'in_file3')])])
    # output the mrcat file into file 'vf.mif'
    wf.connect([(n_mrcat_fod, n_datasink, [('out_file', 'vf.mif')])])

    # fod2fixel
    # Perform segmentation of continuous FODs to produce discrete fixels
    # OUTPUTS: -afd afd.mif -peak peak.mif -disp disp.mif
    n_fod2fixel = Node(
        interface=fod2fixelfunc.fod2fixel(
            out_file='wmfixels',
            #afd_file = 'afd.mif',
            peak_file='peak.mif',
            disp_file='disp.mif'),
        name='n_fod2fixel')
    # let the peak value parameter be trialed as multiple values
    n_fod2fixel.iterables = ('fmls_peak_value', [0, 0.10, 0.50])
    n_fod2fixel.iterables = ('fmls_integral', [0, 0.10, 0.50])

    # obtain WM fibre image as input
    wf.connect([(n_dwi2fod, n_fod2fixel, [('wm_odf', 'in_file')])])
    # ouputs of fod2fixel saved
    wf.connect([(n_fod2fixel, n_datasink, [('out_file', 'wmfixels')])])
    wf.connect([(n_fod2fixel, n_datasink, [('afd_file', 'afd.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('peak_file', 'peak.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('disp_file', 'disp.mif')])])

    # fixel2peaks to convert data in the fixel directory format into 4D image of 3-vectors
    n_fixel2peaks = Node(interface=fixel2peaksfunc.fixel2peaks(
        out_file='peaks_wmdirections.mif'),
                         name='n_fixel2peaks')
    # look at multiple values for maximum number of fixels in each voxel
    n_fixel2peaks.iterables = ('number', [1, 2, 3])

    # obtain directions file in output folder of fod2fixel, as input
    wf.connect([(n_fod2fixel, n_fixel2peaks, [('out_file', 'in_file')])])
    # ouput of fixel2peaks saved in peaks_wmdirections.mif'
    wf.connect([(n_fixel2peaks, n_datasink, [('out_file',
                                              'peaks_wmdirections.mif')])])

    # MRmath to find normalised value of peak WM directions
    n_mrmath = Node(interface=mrt.MRMath(
        axis=3, operation='norm', out_file='norm_peaks_wmdirections.mif'),
                    name='n_mrmath')
    # input peak fixel data
    wf.connect([(n_fixel2peaks, n_mrmath, [('out_file', 'in_file')])])
    # output saved into 'norm_peaks_wmdirections.mif'
    wf.connect([(n_mrmath, n_datasink, [('out_file',
                                         'norm_peaks_wmdirections.mif')])])

    # MRcalc to divide peak WM direction by normalised value
    n_mrcalc = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                out_file='wm_peak_dir.mif'),
                    name='n_mrcalc')
    # fixel2peaks image as input 1
    wf.connect([(n_fixel2peaks, n_mrcalc, [('out_file', 'in_file1')])])
    # normalised fixel2peak image as input 2
    wf.connect([(n_mrmath, n_mrcalc, [('out_file', 'in_file2')])])
    # save output image as 'WM_peak_dir.mif'
    wf.connect([(n_mrcalc, n_datasink, [('out_file', 'WM_peak_dir.mif')])])

    # MRconvert to extract Z component of peak directions
    n_mrconvert2 = Node(interface=utils.MRConvert(
        out_file='Zpeak_WM_Directions.mif', coord=[3, 2]),
                        name='n_mrconvert2')
    # input normalised peak direction file
    wf.connect([(n_mrcalc, n_mrconvert2, [('out_file', 'in_file')])])
    # save ouptut as 'Zpeak_WM_Directions.mif'
    wf.connect([(n_mrconvert2, n_datasink, [('out_file',
                                             'Zpeak_WM_Directions.mif')])])

    # MRcalc to find absolute value of peak fibre directions
    n_mrcalc2 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='absZpeak_WM_Directions.mif'),
                     name='n_mrcalc2')
    # input z peaks image
    wf.connect([(n_mrconvert2, n_mrcalc2, [('out_file', 'in_file1')])])
    # save output as 'absZpeak_WM_Directions.mif'
    wf.connect([(n_mrcalc2, n_datasink, [('out_file',
                                          'absZpeak_WM_Directions.mif')])])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc3 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acosZpeak_WM_Directions.mif'),
                     name='n_mrcalc3')
    # input normalised z component of peaks image
    wf.connect([(n_mrcalc2, n_mrcalc3, [('out_file', 'in_file1')])])
    # save ouput as 'acosZpeak_WM_Directions.mif'
    wf.connect([(n_mrcalc3, n_datasink, [('out_file',
                                          'acosZpeak_WM_Directions.mif')])])

    # MRcalc to convert angle of peak fibre (w.r.t z axis), to degrees
    n_mrcalc4 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply', operand=180, out_file='Fixel1_Z_angle.mif'),
                     name='n_mrcalc4')
    # input inverse cosine image of peak fibre
    wf.connect([(n_mrcalc3, n_mrcalc4, [('out_file', 'in_file1')])])
    # output image as 'Fixel1_Z_angle.mif'
    wf.connect([(n_mrcalc4, n_datasink, [('out_file', 'Fixel1_Z_angle.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc5 = Node(interface=mrcalcfunc.MRCalc(
        operation='divide',
        operand=3.14159265,
        out_file='Fixel1_Z_cos_deg.mif'),
                     name='n_mrcalc5')
    # input image multiplied by 180
    wf.connect([(n_mrcalc4, n_mrcalc5, [('out_file', 'in_file1')])])
    # save output as 'Fixel1_Z_cos_deg.mif'
    wf.connect([(n_mrcalc5, n_datasink, [('out_file', 'Fixel1_Z_cos_deg.mif')])
                ])

    ##################################################################################
    ## 3) Tensor-based analysis
    # dwi2tensor to compute tensor from biascorrected DWI image
    n_dwi2tensor = Node(interface=mrt.FitTensor(out_file='dti.mif'),
                        name='n_dwi2tensor')
    # input bias corrected image
    wf.connect([(n_dwibiascorrect, n_dwi2tensor, [('out_file', 'in_file')])])
    # utilise mask to only compute tensors for regions of Brain
    wf.connect([(n_dwi2mask, n_dwi2tensor, [('out_file', 'in_mask')])])
    # output data into 'dt.mif'
    wf.connect([(n_dwi2tensor, n_datasink, [('out_file', 'dt.mif')])])

    # tensor2metric to convert tensors to generate maps of tensor-derived parameters
    n_tensor2metric = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='eigenvector.mif'),
                           name='n_tensor2metric')
    # input tensor image
    wf.connect([(n_dwi2tensor, n_tensor2metric, [('out_file', 'input_file')])])
    # save output eigenvectors of the diffusion tensor
    wf.connect([(n_tensor2metric, n_datasink, [('vector_file',
                                                'eigenvector.mif')])])

    # MRconvert to get eigenvector w.r.t z direction (main field)
    n_mrconvert3 = Node(interface=utils.MRConvert(coord=[3, 2],
                                                  out_file='eigenvectorZ.mif'),
                        name='n_mrconvert3')
    # input eigenvector file from tensor2metric
    wf.connect([(n_tensor2metric, n_mrconvert3, [('vector_file', 'in_file')])])
    # save output as 'eigenvectorZ.mif'
    wf.connect([(n_mrconvert3, n_datasink, [('out_file', 'eigenvectorZ.mif')])
                ])

    # ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # MRcalc to find absolute value of z eigenvector file
    n_mrcalc6 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='abs_eigenvectorZ.mif'),
                     name='n_mrcalc6')
    # z eigenvector image as input
    wf.connect([(n_mrconvert3, n_mrcalc6, [('out_file', 'in_file1')])])
    # save output as 'abs_eigenvectorZ.mif'
    wf.connect([(n_mrcalc6, n_datasink, [('out_file', 'abs_eigenvectorZ.mif')])
                ])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc7 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_eigenvectorZ.mif'),
                     name='n_mrcalc7')
    # input absolute value of z eigenvector image
    wf.connect([(n_mrcalc6, n_mrcalc7, [('out_file', 'in_file1')])])
    # save output as 'acos_eigenvectorZ.mif'
    wf.connect([(n_mrcalc7, n_datasink, [('out_file', 'acos_eigenvectorZ.mif')
                                         ])])

    # MRcalc to convert angle to degrees
    n_mrcalc8 = Node(
        interface=mrcalcfunc.MRCalc(operation='multiply',
                                    operand=180,
                                    out_file='degrees_eigenvectorZ.mif'),
        name='n_mrcalc8')
    # input inverse cosine image of z eigenvector
    wf.connect([(n_mrcalc7, n_mrcalc8, [('out_file', 'in_file1')])])
    # save output as 'degrees_eigenvectorZ.mif'
    wf.connect([(n_mrcalc8, n_datasink, [('out_file',
                                          'degrees_eigenvectorZ.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc9 = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                 operand=3.14159265,
                                                 out_file='dti_z_cos_deg.mif'),
                     name='n_mrcalc9')
    # input z eigenvector image multiplied by 180
    wf.connect([(n_mrcalc8, n_mrcalc9, [('out_file', 'in_file1')])])
    # save output as 'dti_z_cos_deg.mif'
    wf.connect([(n_mrcalc9, n_datasink, [('out_file', 'dti_z_cos_deg.mif')])])

    # MRcalc to give difference image between fixel based and tensor based outputs
    n_mrcalc10 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diff_imag_tensor_minus_fixel.mif'),
                      name='n_mrcalc10')
    # input tensor based image of whole Brain
    wf.connect([(n_mrcalc9, n_mrcalc10, [('out_file', 'in_file1')])])
    # input fixel based image of Brain
    wf.connect([(n_mrcalc5, n_mrcalc10, [('out_file', 'in_file2')])])
    # output difference image as 'diff_imag_tensor_minus_fixel.mif'
    wf.connect([(n_mrcalc10, n_datasink,
                 [('out_file', 'diff_imag_tensor_minus_fixel.mif')])])

    #####################################################################################
    ## 4) Tensor based analysis on WM fibres only (NOT WHOLE BRAIN TENSORS)

    # MRthreshold to create WM mask from WM FOD (created earlier)
    n_mrthreshold = Node(interface=mrthresholdfunc.MRThreshold(
        out_file='thresholded_wmfod.mif'),
                         name='n_mrthreshold')
    # input WM FOD
    wf.connect([(n_dwi2fod, n_mrthreshold, [('wm_odf', 'in_file')])])
    # output thresholded WM FOD
    wf.connect([(n_mrthreshold, n_datasink, [('out_file',
                                              'thresholded_wmfod.mif')])])

    # MRconvert to extract 1st volume of thresholded WM FOD
    n_mrconvert4 = Node(interface=utils.MRConvert(coord=[3, 0],
                                                  out_file='WMmask.mif'),
                        name='n_mrconvert4')
    # input thresholded wmfod
    wf.connect([(n_mrthreshold, n_mrconvert4, [('out_file', 'in_file')])])
    # save output as 'WMmask.mif'
    wf.connect([(n_mrconvert4, n_datasink, [('out_file', 'WMmask.mif')])])

    # MRcalc to multiple WM mask with dti image to get tensors only of WM regions
    n_mrcalc11 = Node(interface=mrcalcfunc.MRCalc(operation='multiply',
                                                  out_file='WM_dt.mif'),
                      name='n_mrcalc11')
    # WM mask as input 1
    wf.connect([(n_mrconvert4, n_mrcalc11, [('out_file', 'in_file1')])])
    # dti image as input 2
    wf.connect([(n_dwi2tensor, n_mrcalc11, [('out_file', 'in_file2')])])
    # save output as 'WM_dt.mif'
    wf.connect([(n_mrcalc11, n_datasink, [('out_file', 'WM_dt.mif')])])

    # tensor2metric to convert tensors to generate maps of tensor-derived parameters
    n_tensor2metric2 = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='WMeigenvector.mif'),
                            name='n_tensor2metric2')
    # input tensor image
    wf.connect([(n_mrcalc11, n_tensor2metric2, [('out_file', 'input_file')])])
    # save output eigenvectors of the diffusion tensor
    wf.connect([(n_tensor2metric2, n_datasink, [('vector_file',
                                                 'WMeigenvector.mif')])])

    # MRconvert to get eigenvector w.r.t z direction (main field)
    n_mrconvert5 = Node(interface=utils.MRConvert(
        coord=[3, 2], out_file='WMeigenvectorZ.mif'),
                        name='n_mrconvert5')
    # input eigenvector file from tensor2metric
    wf.connect([(n_tensor2metric2, n_mrconvert5, [('vector_file', 'in_file')])
                ])
    # save output as 'eigenvectorZ.mif'
    wf.connect([(n_mrconvert5, n_datasink, [('out_file', 'WMeigenvectorZ.mif')
                                            ])])

    # ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # MRcalc to find absolute value of z eigenvector file
    n_mrcalc12 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='WM_abs_eigenvectorZ.mif'),
                      name='n_mrcalc12')
    # z eigenvector image as input
    wf.connect([(n_mrconvert5, n_mrcalc12, [('out_file', 'in_file1')])])
    # save output as 'WM_abs_eigenvectorZ.mif'
    wf.connect([(n_mrcalc12, n_datasink, [('out_file',
                                           'WM_abs_eigenvectorZ.mif')])])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc13 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_WMeigenvectorZ.mif'),
                      name='n_mrcalc13')
    # input absolute value of z eigenvector image
    wf.connect([(n_mrcalc12, n_mrcalc13, [('out_file', 'in_file1')])])
    # save output as 'acos_WMeigenvectorZ.mif'
    wf.connect([(n_mrcalc13, n_datasink, [('out_file',
                                           'acos_WMeigenvectorZ.mif')])])

    # MRcalc to convert angle to degrees
    n_mrcalc14 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply',
        operand=180,
        out_file='degrees_WMeigenvectorZ.mif'),
                      name='n_mrcalc14')
    # input inverse cosine image of WM z eigenvector
    wf.connect([(n_mrcalc13, n_mrcalc14, [('out_file', 'in_file1')])])
    # save output as 'degrees_WMeigenvectorZ.mif'
    wf.connect([(n_mrcalc14, n_datasink, [('out_file',
                                           'degrees_WMeigenvectorZ.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc15 = Node(
        interface=mrcalcfunc.MRCalc(operation='divide',
                                    operand=3.14159265,
                                    out_file='WMdti_z_cos_deg.mif'),
        name='n_mrcalc15')
    # input WM z eigenvector image multiplied by 180
    wf.connect([(n_mrcalc14, n_mrcalc15, [('out_file', 'in_file1')])])
    # save output as 'WMdti_z_cos_deg.mif'
    wf.connect([(n_mrcalc15, n_datasink, [('out_file', 'WMdti_z_cos_deg.mif')])
                ])

    # MRcalc to give difference image between fixel based and WM tensor based outputs
    n_mrcalc16 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diffImage_WMtensor_minus_fixel.mif'),
                      name='n_mrcalc16')
    # input fixel image of Brain
    wf.connect([(n_mrcalc15, n_mrcalc16, [('out_file', 'in_file1')])])
    # input tensor image of WM fibres of Brain
    wf.connect([(n_mrcalc5, n_mrcalc16, [('out_file', 'in_file2')])])
    # output difference image as 'diff_imag_WMtensor_minus_fixel.mif'
    wf.connect([(n_mrcalc16, n_datasink,
                 [('out_file', 'diffImage_WMtensor_minus_fixel.mif')])])
    ######################################################################################
    return wf