def test_Tractography_outputs():
    output_map = dict(out_file=dict(),
    out_seeds=dict(),
    )
    outputs = Tractography.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #2
0
def test_Tractography_outputs():
    output_map = dict(
        out_file=dict(),
        out_seeds=dict(),
    )
    outputs = Tractography.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #3
0
def test_Tractography_inputs():
    input_map = dict(
        act_file=dict(argstr='-act %s', ),
        algorithm=dict(
            argstr='-algorithm %s',
            usedefault=True,
        ),
        angle=dict(argstr='-angle %f', ),
        args=dict(argstr='%s', ),
        backtrack=dict(argstr='-backtrack', ),
        bval_scale=dict(argstr='-bvalue_scaling %s', ),
        crop_at_gmwmi=dict(argstr='-crop_at_gmwmi', ),
        cutoff=dict(argstr='-cutoff %f', ),
        cutoff_init=dict(argstr='-initcutoff %f', ),
        downsample=dict(argstr='-downsample %f', ),
        environ=dict(
            nohash=True,
            usedefault=True,
        ),
        grad_file=dict(argstr='-grad %s', ),
        grad_fsl=dict(argstr='-fslgrad %s %s', ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        in_bval=dict(),
        in_bvec=dict(argstr='-fslgrad %s %s', ),
        in_file=dict(
            argstr='%s',
            mandatory=True,
            position=-2,
        ),
        init_dir=dict(argstr='-initdirection %f,%f,%f', ),
        max_length=dict(argstr='-maxlength %f', ),
        max_seed_attempts=dict(argstr='-max_seed_attempts %d', ),
        max_tracks=dict(argstr='-maxnum %d', ),
        min_length=dict(argstr='-minlength %f', ),
        n_samples=dict(argstr='-samples %d', ),
        n_tracks=dict(argstr='-number %d', ),
        n_trials=dict(argstr='-trials %d', ),
        noprecompt=dict(argstr='-noprecomputed', ),
        nthreads=dict(
            argstr='-nthreads %d',
            nohash=True,
        ),
        out_file=dict(
            argstr='%s',
            mandatory=True,
            position=-1,
            usedefault=True,
        ),
        out_seeds=dict(argstr='-output_seeds %s', ),
        power=dict(argstr='-power %d', ),
        roi_excl=dict(argstr='-exclude %s', ),
        roi_incl=dict(argstr='-include %s', ),
        roi_mask=dict(argstr='-mask %s', ),
        seed_dynamic=dict(argstr='-seed_dynamic %s', ),
        seed_gmwmi=dict(
            argstr='-seed_gmwmi %s',
            requires=['act_file'],
        ),
        seed_grid_voxel=dict(
            argstr='-seed_grid_per_voxel %s %d',
            xor=['seed_image', 'seed_rnd_voxel'],
        ),
        seed_image=dict(argstr='-seed_image %s', ),
        seed_rejection=dict(argstr='-seed_rejection %s', ),
        seed_rnd_voxel=dict(
            argstr='-seed_random_per_voxel %s %d',
            xor=['seed_image', 'seed_grid_voxel'],
        ),
        seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f', ),
        sph_trait=dict(argstr='%f,%f,%f,%f', ),
        step_size=dict(argstr='-step %f', ),
        stop=dict(argstr='-stop', ),
        terminal_output=dict(nohash=True, ),
        unidirectional=dict(argstr='-unidirectional', ),
        use_rk4=dict(argstr='-rk4', ),
    )
    inputs = Tractography.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.mrtrix3 as mrtrix3
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        from nipype.interfaces.mrtrix3.tracking import Tractography
        from nipype.interfaces.mrtrix.preprocess import MRTransform

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.utils.exceptions import ClinicaCAPSError
        from clinica.utils.mri_registration import (
            convert_flirt_transformation_to_mrtrix_transformation,
        )

        # Nodes
        # =====
        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction", interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = "t"
        select_node = npe.Node(name="Reg-0-DWI-B0Selection", interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking", interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(
            name="Reg-1-T12B0Registration",
            interface=fsl.FLIRT(
                dof=6,
                interp="spline",
                cost="normmi",
                cost_func="normmi",
            ),
        )
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(
            name="Reg-0-T1-T1BrainConvertion", interface=fs.MRIConvert()
        )
        wm_mask_conv_node = npe.Node(
            name="Reg-0-T1-WMMaskConvertion", interface=fs.MRIConvert()
        )

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(
            name="Reg-2-WMTransformation", interface=fsl.ApplyXFM()
        )
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=["in_file", "in_config", "in_lut", "out_file"],
            interface=mrtrix3.LabelConvert(),
        )
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name="Reg-2-FSL2MrtrixConversion",
            interface=niu.Function(
                input_names=[
                    "in_source_image",
                    "in_reference_image",
                    "in_flirt_matrix",
                    "name_output_matrix",
                ],
                output_names=["out_mrtrix_matrix"],
                function=convert_flirt_transformation_to_mrtrix_transformation,
            ),
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform(),
        )

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(
            name="1a-ResponseEstimation", interface=mrtrix3.ResponseSD()
        )
        resp_estim_node.inputs.algorithm = "tournier"

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation", interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = "csd"

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration", interface=Tractography())
        tck_gen_node.inputs.select = self.parameters["n_tracks"]
        tck_gen_node.inputs.algorithm = "iFOD2"

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(
            name="3-ConnectomeGeneration",
            iterfield=["in_parc", "out_file"],
            interface=mrtrix3.BuildConnectome(),
        )

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file"],
                function=utils.print_begin_pipeline,
            ),
            iterfield="in_bids_or_caps_file",
            name="WriteBeginMessage",
        )

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file", "final_file"],
                function=utils.print_end_pipeline,
            ),
            iterfield=["in_bids_or_caps_file"],
            name="WriteEndMessage",
        )

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name="CAPSFilenamesGeneration",
            interface=niu.Function(
                input_names="dwi_file",
                output_names=self.get_output_fields(),
                function=utils.get_caps_filenames,
            ),
        )

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        # fmt: off
        self.connect(
            [
                (self.input_node, print_begin_message, [("dwi_file", "in_bids_or_caps_file")]),
                (self.input_node, caps_filenames_node, [("dwi_file", "dwi_file")]),
                # Response Estimation
                (self.input_node, resp_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (self.input_node, resp_estim_node, [("dwi_brainmask_file", "in_mask")]),  # B0 brain mask
                (self.input_node, resp_estim_node, [("grad_fsl", "grad_fsl")]),  # bvecs and bvals
                (caps_filenames_node, resp_estim_node, [("response", "wm_file")]),  # output response filename
                # FOD Estimation
                (self.input_node, fod_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (resp_estim_node, fod_estim_node, [("wm_file", "wm_txt")]),  # Response (txt file)
                (self.input_node, fod_estim_node, [("dwi_brainmask_file", "mask_file")]),  # B0 brain mask
                (self.input_node, fod_estim_node, [("grad_fsl", "grad_fsl")]),  # T1-to-B0 matrix file
                (caps_filenames_node, fod_estim_node, [("fod", "wm_odf")]),  # output odf filename
                # Tracts Generation
                (fod_estim_node, tck_gen_node, [("wm_odf", "in_file")]),  # ODF file
                (caps_filenames_node, tck_gen_node, [("tracts", "out_file")]),  # output tck filename
                # Label Conversion
                (self.input_node, label_convert_node, [("atlas_files", "in_file")]),  # atlas image files
                (caps_filenames_node, label_convert_node, [("nodes", "out_file")]),  # converted atlas image filenames
                # Connectomes Generation
                (tck_gen_node, conn_gen_node, [("out_file", "in_file")]),
                (caps_filenames_node, conn_gen_node, [("connectomes", "out_file")]),
            ]
        )
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    # MGZ Files Conversion
                    (self.input_node, t1_brain_conv_node, [("t1_brain_file", "in_file")]),
                    (self.input_node, wm_mask_conv_node, [("wm_mask_file", "in_file")]),
                    # B0 Extraction
                    (self.input_node, split_node, [("dwi_file", "in_file")]),
                    (split_node, select_node, [("out_files", "inlist")]),
                    # Masking
                    (select_node, mask_node, [("out", "in_file")]),  # B0
                    (self.input_node, mask_node, [("dwi_brainmask_file", "mask_file")]),  # Brain mask
                    # T1-to-B0 Registration
                    (t1_brain_conv_node, t12b0_reg_node, [("out_file", "in_file")]),  # Brain
                    (mask_node, t12b0_reg_node, [("out_file", "reference")]),  # B0 brain-masked
                    # WM Transformation
                    (wm_mask_conv_node, wm_transform_node, [("out_file", "in_file")]),  # Brain mask
                    (mask_node, wm_transform_node, [("out_file", "reference")]),  # BO brain-masked
                    (t12b0_reg_node, wm_transform_node, [("out_matrix_file", "in_matrix_file")]),  # T1-to-B0 matrix file
                    # FSL flirt matrix to MRtrix matrix Conversion
                    (t1_brain_conv_node, fsl2mrtrix_conv_node, [("out_file", "in_source_image")]),
                    (mask_node, fsl2mrtrix_conv_node, [("out_file", "in_reference_image")]),
                    (t12b0_reg_node, fsl2mrtrix_conv_node, [("out_matrix_file", "in_flirt_matrix")]),
                    # Apply registration without resampling on parcellations
                    (label_convert_node, parc_transform_node, [("out_file", "in_files")]),
                    (fsl2mrtrix_conv_node, parc_transform_node, [("out_mrtrix_matrix", "linear_transform")]),
                    (caps_filenames_node, parc_transform_node, [("nodes", "out_filename")]),
                ]
            )
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    (wm_transform_node, tck_gen_node, [("out_file", "seed_image")]),
                    (parc_transform_node, conn_gen_node, [("out_file", "in_parc")]),
                    (parc_transform_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        elif self.parameters["dwi_space"] == "T1w":
            self.connect(
                [
                    (self.input_node, tck_gen_node, [("wm_mask_file", "seed_image")]),
                    (label_convert_node, conn_gen_node, [("out_file", "in_parc")]),
                    (label_convert_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        else:
            raise ClinicaCAPSError(
                "Bad preprocessed DWI space. Please check your CAPS folder."
            )
        # Outputs
        # -------
        self.connect(
            [
                (resp_estim_node, self.output_node, [("wm_file", "response")]),
                (fod_estim_node, self.output_node, [("wm_odf", "fod")]),
                (tck_gen_node, self.output_node, [("out_file", "tracts")]),
                (conn_gen_node, self.output_node, [("out_file", "connectomes")]),
                (self.input_node, print_end_message, [("dwi_file", "in_bids_or_caps_file")]),
                (conn_gen_node, print_end_message, [("out_file", "final_file")]),
            ]
        )
def test_Tractography_inputs():
    input_map = dict(act_file=dict(argstr='-act %s',
    ),
    algorithm=dict(argstr='-algorithm %s',
    usedefault=True,
    ),
    angle=dict(argstr='-angle %f',
    ),
    args=dict(argstr='%s',
    ),
    backtrack=dict(argstr='-backtrack',
    ),
    bval_scale=dict(argstr='-bvalue_scaling %s',
    ),
    crop_at_gmwmi=dict(argstr='-crop_at_gmwmi',
    ),
    cutoff=dict(argstr='-cutoff %f',
    ),
    cutoff_init=dict(argstr='-initcutoff %f',
    ),
    downsample=dict(argstr='-downsample %f',
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    grad_file=dict(argstr='-grad %s',
    ),
    grad_fsl=dict(argstr='-fslgrad %s %s',
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    in_bval=dict(),
    in_bvec=dict(argstr='-fslgrad %s %s',
    ),
    in_file=dict(argstr='%s',
    mandatory=True,
    position=-2,
    ),
    init_dir=dict(argstr='-initdirection %f,%f,%f',
    ),
    max_length=dict(argstr='-maxlength %f',
    ),
    max_seed_attempts=dict(argstr='-max_seed_attempts %d',
    ),
    max_tracks=dict(argstr='-maxnum %d',
    ),
    min_length=dict(argstr='-minlength %f',
    ),
    n_samples=dict(argstr='-samples %d',
    ),
    n_tracks=dict(argstr='-number %d',
    ),
    n_trials=dict(argstr='-trials %d',
    ),
    noprecompt=dict(argstr='-noprecomputed',
    ),
    nthreads=dict(argstr='-nthreads %d',
    nohash=True,
    ),
    out_file=dict(argstr='%s',
    mandatory=True,
    position=-1,
    usedefault=True,
    ),
    out_seeds=dict(argstr='-output_seeds %s',
    ),
    power=dict(argstr='-power %d',
    ),
    roi_excl=dict(argstr='-exclude %s',
    ),
    roi_incl=dict(argstr='-include %s',
    ),
    roi_mask=dict(argstr='-mask %s',
    ),
    seed_dynamic=dict(argstr='-seed_dynamic %s',
    ),
    seed_gmwmi=dict(argstr='-seed_gmwmi %s',
    requires=['act_file'],
    ),
    seed_grid_voxel=dict(argstr='-seed_grid_per_voxel %s %d',
    xor=['seed_image', 'seed_rnd_voxel'],
    ),
    seed_image=dict(argstr='-seed_image %s',
    ),
    seed_rejection=dict(argstr='-seed_rejection %s',
    ),
    seed_rnd_voxel=dict(argstr='-seed_random_per_voxel %s %d',
    xor=['seed_image', 'seed_grid_voxel'],
    ),
    seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f',
    ),
    sph_trait=dict(argstr='%f,%f,%f,%f',
    ),
    step_size=dict(argstr='-step %f',
    ),
    stop=dict(argstr='-stop',
    ),
    terminal_output=dict(nohash=True,
    ),
    unidirectional=dict(argstr='-unidirectional',
    ),
    use_rk4=dict(argstr='-rk4',
    ),
    )
    inputs = Tractography.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value