コード例 #1
0
ファイル: inputs.py プロジェクト: yogeshmj/clinica
def check_caps_folder(caps_directory):
    """
    check_caps_folder function checks the following items:
        - caps_directory is a string
        - the provided path exists and is a directory
        - provided path is not a BIDS folder (BIDS and CAPS could be swapped by user). We simply check that there is
          not a folder whose name starts with 'sub-' in the provided path (that exists in BIDS hierarchy)
    Keep in mind that CAPS folder can be empty
    """
    from os import listdir
    import os
    from colorama import Fore
    from clinica.utils.exceptions import ClinicaCAPSError

    assert isinstance(caps_directory, str), 'Argument you provided to check_caps_folder() is not a string.'

    if not os.path.isdir(caps_directory):
        raise ClinicaCAPSError(Fore.RED + '\n[Error] The CAPS directory you gave is not a folder.\n' + Fore.RESET
                               + Fore.YELLOW + '\nError explanations:\n' + Fore.RESET
                               + ' - Clinica expected the following path to be a folder:' + Fore.BLUE + caps_directory
                               + Fore.RESET + '\n'
                               + ' - If you gave relative path, did you run Clinica on the good folder?')

    sub_folders = [item for item in listdir(caps_directory) if item.startswith('sub-')]
    if len(sub_folders) > 0:
        error_string = '\n[Error] Your CAPS directory contains at least one folder whose name ' \
                       + 'starts with \'sub-\'. Check that you did not swap BIDS and CAPS folders.\n' \
                       + ' Folder(s) found that match(es) BIDS architecture:\n'
        for dir in sub_folders:
            error_string += '\t' + dir + '\n'
        error_string += 'A CAPS directory has a folder \'subjects\' at its root, in which are stored the output ' \
                        + 'of the pipeline for each subject.'
        raise ClinicaCAPSError(error_string)
コード例 #2
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import t1_volume_i_th_iteration_group_template, t1_volume_dartel_input_tissue
        from clinica.utils.ux import print_images_to_process

        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True))

        all_errors = []

        # Dartel Input Tissues
        # ====================
        d_input = []
        for tissue_number in self.parameters['tissues']:
            try:
                current_file = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number))
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Dartel Templates
        # ================
        dartel_iter_templates = []
        for i in range(1, 7):
            try:
                current_iter = clinica_group_reader(
                    self.caps_directory,
                    t1_volume_i_th_iteration_group_template(
                        self.parameters['group_id'], i))

                dartel_iter_templates.append(current_iter)
            except ClinicaException as e:
                all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_input_node.inputs.dartel_input_images = d_input
        read_input_node.inputs.dartel_iteration_templates = dartel_iter_templates

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)

        self.connect([(read_input_node, self.input_node,
                       [('dartel_input_images', 'dartel_input_images')]),
                      (read_input_node, self.input_node,
                       [('dartel_iteration_templates',
                         'dartel_iteration_templates')])])
コード例 #3
0
def clinica_group_reader(caps_directory, information, raise_exception=True):
    """Read files CAPS directory based on group ID(s).

    This function grabs files relative to a group, according to a glob pattern (using *). Only one file can be returned,
    as order is arbitrary in glob.glob().
    Args:
        caps_directory: input caps directory
        information: dictionary containing all the relevant information to look for the files. Dict must contains the
                     following keys : pattern, description, needed_pipeline
                             pattern: define the pattern of the final file
                             description: string to describe what the file is
                             needed_pipeline (optional): string describing the pipeline needed to obtain the file beforehand
        raise_exception: if True (normal behavior), an exception is raised if errors happen. If not, we return the file
                        list as it is

    Returns:
          string of the found file

    Raises:
        ClinicaCAPSError if no file is found, or more than 1 files are found
    """
    from os.path import join

    from clinica.utils.exceptions import ClinicaCAPSError

    assert isinstance(information,
                      dict), "A dict must be provided for the argument 'dict'"
    assert all(
        elem in information.keys()
        for elem in ["pattern", "description", "needed_pipeline"]
    ), "'information' must contain the keys 'pattern', 'description', 'needed_pipeline'"

    pattern = information["pattern"]
    # Some check on the formatting on the data
    assert pattern[0] != "/", (
        "pattern argument cannot start with char: / (does not work in os.path.join function). "
        "If you want to indicate the exact name of the file, use the format"
        " directory_name/filename.extension or filename.extension in the pattern argument."
    )

    check_caps_folder(caps_directory)

    current_pattern = join(caps_directory, "**/", pattern)
    current_glob_found = insensitive_glob(current_pattern, recursive=True)

    if len(current_glob_found) != 1 and raise_exception is True:
        error_string = f"Clinica encountered a problem while getting {information['description']}. "
        if len(current_glob_found) == 0:
            error_string += "No file was found"
        else:
            error_string += f"{len(current_glob_found)} files were found:"
            for found_files in current_glob_found:
                error_string += f"\n\t{found_files}"
            error_string += (
                f"\n\tCAPS directory: {caps_directory}\n"
                "Please note that the following clinica pipeline(s) must have run to obtain these files: "
                f"{information['needed_pipeline']}\n")
        raise ClinicaCAPSError(error_string)
    return current_glob_found[0]
コード例 #4
0
def clinica_group_reader(caps_directory, information, raise_exception=True):
    """
    This function grabs files relative to a group, according to a glob pattern (using *). Only one file can be returned,
    as order is arbitrary in glob.glob().
    Args:
        caps_directory: input caps directory
        information: dictionnary containg all the relevant information to look for the files. Dict must contains the
                     following keys : pattern, description, needed_pipeline
                             pattern: define the pattern of the final file
                             description: string to describe what the file is
                             needed_pipeline (optional): string describing the pipeline needed to obtain the file beforehand
        raise_exception: if True (normal behavior), an exception is raised if errors happen. If not, we return the file
                        list as it is

    Returns:
          string of the found file

    Raises:
        ClinicaCAPSError if no file is found, or more than 1 files are found
    """
    from os.path import join
    from colorama import Fore
    from clinica.utils.exceptions import ClinicaCAPSError

    assert isinstance(
        information, dict), 'A dict must be provided for the argmuent \'dict\''
    assert all(
        elem in information.keys()
        for elem in ['pattern', 'description', 'needed_pipeline']
    ), '\'information\' must contain the keys \'pattern\', \'description\', \'needed_pipeline\''

    pattern = information['pattern']
    # Some check on the formatting on the data
    assert pattern[0] != '/', 'pattern argument cannot start with char: / (does not work in os.path.join function). ' \
                              + 'If you want to indicate the exact name of the file, use the format' \
                              + ' directory_name/filename.extension or filename.extension in the pattern argument'

    check_caps_folder(caps_directory)

    current_pattern = join(caps_directory, '**/', pattern)
    current_glob_found = insensitive_glob(current_pattern, recursive=True)

    if len(current_glob_found) != 1 and raise_exception is True:
        error_string = Fore.RED + '\n[Error] Clinica encountered a problem while getting ' + information[
            'description'] + '. '
        if len(current_glob_found) == 0:
            error_string += 'No file was found'
        else:
            error_string += str(len(current_glob_found)) + ' files were found:'
            for found_files in current_glob_found:
                error_string += '\n\t' + found_files
            error_string += (
                Fore.RESET + '\n\tCAPS directory: ' + caps_directory + '\n' +
                Fore.YELLOW +
                'Please note that the following clinica pipeline(s) must have run to obtain these files: '
                + information['needed_pipeline'] + Fore.RESET + '\n')
        raise ClinicaCAPSError(error_string)
    return current_glob_found[0]
コード例 #5
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from colorama import Fore

        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.input_files import t1_volume_template_tpm_in_mni
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import (
            print_groups_in_caps_directory,
            print_images_to_process,
        )

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, "groups",
                             f"group-{self.parameters['group_label']}")):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                f"%{Fore.RED}Group {self.parameters['group_label']} does not exist. "
                f"Did you run t1-volume or t1-volume-create-dartel pipeline?{Fore.RESET}"
            )

        try:
            gm_mni = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                t1_volume_template_tpm_in_mni(self.parameters["group_label"],
                                              1, True),
            )
        except ClinicaException as e:
            final_error_str = "Clinica faced error(s) while trying to read files in your CAPS directory.\n"
            final_error_str += str(e)
            raise ClinicaCAPSError(final_error_str)

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
        )
        read_parameters_node.inputs.file_list = gm_mni
        read_parameters_node.inputs.atlas_list = self.parameters["atlases"]

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last a few seconds per image.")

        self.connect([
            (read_parameters_node, self.input_node, [("file_list", "file_list")
                                                     ]),
            (read_parameters_node, self.input_node, [("atlas_list",
                                                      "atlas_list")]),
        ])
コード例 #6
0
ファイル: inputs.py プロジェクト: ghisvail/clinica
def check_caps_folder(caps_directory):
    """Check CAPS folder.

    This function checks the following items:
        - caps_directory is a string
        - the provided path exists and is a directory
        - provided path is not a BIDS folder (BIDS and CAPS could be swapped by user). We simply check that there is
          not a folder whose name starts with 'sub-' in the provided path (that exists in BIDS hierarchy)
    Keep in mind that CAPS folder can be empty.
    """
    import os
    from os import listdir

    from colorama import Fore

    from clinica.utils.exceptions import ClinicaCAPSError

    assert isinstance(
        caps_directory,
        str), "Argument you provided to check_caps_folder() is not a string."

    if not os.path.isdir(caps_directory):
        raise ClinicaCAPSError(
            f"{Fore.RED}\n[Error] The CAPS directory you gave is not a folder.\n{Fore.RESET}"
            f"{Fore.YELLOW}\nError explanations:\n{Fore.RESET}"
            f" - Clinica expected the following path to be a folder: {Fore.BLUE}{caps_directory}{Fore.RESET}\n"
            f" - If you gave relative path, did you run Clinica on the good folder?"
        )

    sub_folders = [
        item for item in listdir(caps_directory) if item.startswith("sub-")
    ]
    if len(sub_folders) > 0:
        error_string = (
            "\n[Error] Your CAPS directory contains at least one folder whose name "
            "starts with 'sub-'. Check that you did not swap BIDS and CAPS folders.\n"
            " Folder(s) found that match(es) BIDS architecture:\n")
        for directory in sub_folders:
            error_string += f"\t{directory}\n"
        error_string += (
            "A CAPS directory has a folder 'subjects' at its root, in which "
            "are stored the output of the pipeline for each subject.")
        raise ClinicaCAPSError(error_string)
コード例 #7
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import t1_volume_template_tpm_in_mni
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, 'groups',
                             'group-' + self.parameters['group_id'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_id'], Fore.RESET))

        try:
            gm_mni = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                t1_volume_template_tpm_in_mni(self.parameters['group_id'], 1,
                                              True))
        except ClinicaException as e:
            final_error_str = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n'
            final_error_str += str(e)
            raise ClinicaCAPSError(final_error_str)

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True))
        read_parameters_node.inputs.file_list = gm_mni
        read_parameters_node.inputs.atlas_list = self.parameters['atlases']

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last a few seconds per image.')

        self.connect([(read_parameters_node, self.input_node, [('file_list',
                                                                'file_list')]),
                      (read_parameters_node, self.input_node,
                       [('atlas_list', 'atlas_list')])])
コード例 #8
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.stream import cprint
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.inputs import clinica_file_reader
        import clinica.utils.input_files as input_files
        import re

        all_errors = []

        # Inputs from t1-freesurfer pipeline
        # ==================================

        # White matter segmentation
        try:
            wm_mask_files = clinica_file_reader(self.subjects, self.sessions,
                                                self.caps_directory,
                                                input_files.T1_FS_WM)
        except ClinicaException as e:
            all_errors.append(e)

        # Desikan parcellation
        try:
            aparc_aseg_files = clinica_file_reader(self.subjects,
                                                   self.sessions,
                                                   self.caps_directory,
                                                   input_files.T1_FS_DESIKAN)
        except ClinicaException as e:
            all_errors.append(e)

        # Destrieux parcellation
        try:
            aparc_aseg_a2009s_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                input_files.T1_FS_DESTRIEUX)
        except ClinicaException as e:
            all_errors.append(e)

        # Inputs from dwi-preprocessing pipeline
        # ======================================
        # Preprocessed DWI
        try:
            dwi_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.caps_directory,
                                            input_files.DWI_PREPROC_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # B0 brainmask
        try:
            dwi_brainmask_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                input_files.DWI_PREPROC_BRAINMASK)
        except ClinicaException as e:
            all_errors.append(e)

        # Preprocessed bvec
        try:
            bvec_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        # Preprocessed bval
        try:
            bval_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        # Check space of DWI dataset
        dwi_file_spaces = [
            re.search('.*_space-(.*)_preproc.nii.*', file,
                      re.IGNORECASE).group(1) for file in dwi_files
        ]

        # Return an error if all the DWI files are not in the same space
        if any(a != dwi_file_spaces[0] for a in dwi_file_spaces):
            raise ClinicaCAPSError(
                'Preprocessed DWI files are not all in the '
                'same space. Please process them separately '
                'using the appropriate subjects/sessions '
                '`.tsv` file (-tsv option).')

        # Used only for for T1-B0 registration
        if dwi_file_spaces[0] == 'b0':
            # Brain extracted T1w
            t1_brain_files = clinica_file_reader(self.subjects, self.sessions,
                                                 self.caps_directory,
                                                 input_files.T1_FS_BRAIN)

        list_atlas_files = [[aparc_aseg, aparc_aseg_a2009]
                            for aparc_aseg, aparc_aseg_a2009 in zip(
                                aparc_aseg_files, aparc_aseg_a2009s_files)]

        list_grad_fsl = [(bvec, bval)
                         for bvec, bval in zip(bvec_files, bval_files)]

        p_id_images_to_process = [
            re.search(r'(sub-[a-zA-Z0-9]+)', caps_file).group()
            for caps_file in dwi_files
        ]
        s_id_images_to_process = [
            re.search(r'(ses-[a-zA-Z0-9]+)', caps_file).group()
            for caps_file in dwi_files
        ]
        images_to_process = ', '.join(
            p_id[4:] + '|' + s_id[4:] for p_id, s_id in zip(
                p_id_images_to_process, s_id_images_to_process))
        cprint('The pipeline will be run on the following subject(s): %s' %
               images_to_process)

        if dwi_file_spaces[0] == 'b0':
            self.parameters['dwi_space'] = 'b0'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[('wm_mask_file', wm_mask_files),
                                            ('t1_brain_file', t1_brain_files),
                                            ('dwi_file', dwi_files),
                                            ('dwi_brainmask_file',
                                             dwi_brainmask_files),
                                            ('grad_fsl', list_grad_fsl),
                                            ('atlas_files', list_atlas_files)],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                     fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('t1_brain_file',
                                               't1_brain_file')]),
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')
                                              ]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file',
                                               'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        elif dwi_file_spaces[0] == 'T1w':
            self.parameters['dwi_space'] = 'T1w'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[('wm_mask_file', wm_mask_files),
                                            ('dwi_file', dwi_files),
                                            ('dwi_brainmask_file',
                                             dwi_brainmask_files),
                                            ('grad_fsl', list_grad_fsl),
                                            ('atlas_files', list_atlas_files)],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                     fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')
                                              ]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file',
                                               'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        else:
            raise ClinicaCAPSError('Bad preprocessed DWI space. Please check '
                                   'your CAPS folder.')
コード例 #9
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.mrtrix3 as mrtrix3
        from clinica.lib.nipype.interfaces.mrtrix.preprocess import MRTransform
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.lib.nipype.interfaces.mrtrix3.tracking import Tractography
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.stream import cprint
        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.utils.mri_registration import convert_flirt_transformation_to_mrtrix_transformation

        # cprint('Building the pipeline...')

        # Nodes
        # =====

        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction",
                              interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = 't'
        select_node = npe.Node(name="Reg-0-DWI-B0Selection",
                               interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking",
                             interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(name="Reg-1-T12B0Registration",
                                  interface=fsl.FLIRT(
                                      dof=6,
                                      interp='spline',
                                      cost='normmi',
                                      cost_func='normmi',
                                  ))
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(name="Reg-0-T1-T1BrainConvertion",
                                      interface=fs.MRIConvert())
        wm_mask_conv_node = npe.Node(name="Reg-0-T1-WMMaskConvertion",
                                     interface=fs.MRIConvert())

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(name="Reg-2-WMTransformation",
                                     interface=fsl.ApplyXFM())
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=['in_file', 'in_config', 'in_lut', 'out_file'],
            interface=mrtrix3.LabelConvert())
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name='Reg-2-FSL2MrtrixConversion',
            interface=niu.Function(
                input_names=[
                    'in_source_image', 'in_reference_image', 'in_flirt_matrix',
                    'name_output_matrix'
                ],
                output_names=['out_mrtrix_matrix'],
                function=convert_flirt_transformation_to_mrtrix_transformation)
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform())

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(name="1a-ResponseEstimation",
                                   interface=mrtrix3.ResponseSD())
        resp_estim_node.inputs.algorithm = 'tournier'

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation",
                                  interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = 'csd'

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration",
                                interface=Tractography())
        tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        tck_gen_node.inputs.algorithm = 'iFOD2'

        # BUG: Info package does not exist
        # from nipype.interfaces.mrtrix3.base import Info
        # from distutils.version import LooseVersion
        #
        # if Info.looseversion() >= LooseVersion("3.0"):
        #     tck_gen_node.inputs.select = self.parameters['n_tracks']
        # elif Info.looseversion() <= LooseVersion("0.4"):
        #     tck_gen_node.inputs.n_tracks = self.parameters['n_tracks']
        # else:
        #     from clinica.utils.exceptions import ClinicaException
        #     raise ClinicaException("Your MRtrix version is not supported.")

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(name="3-ConnectomeGeneration",
                                    iterfield=['in_parc', 'out_file'],
                                    interface=mrtrix3.BuildConnectome())

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file'],
            function=utils.print_begin_pipeline),
                                          iterfield='in_bids_or_caps_file',
                                          name='WriteBeginMessage')

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(interface=niu.Function(
            input_names=['in_bids_or_caps_file', 'final_file'],
            function=utils.print_end_pipeline),
                                        iterfield=['in_bids_or_caps_file'],
                                        name='WriteEndMessage')

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name='CAPSFilenamesGeneration',
            interface=niu.Function(input_names='dwi_file',
                                   output_names=self.get_output_fields(),
                                   function=utils.get_caps_filenames))

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        self.connect([
            (self.input_node, print_begin_message,
             [('dwi_file', 'in_bids_or_caps_file')]),  # noqa
            (self.input_node, caps_filenames_node, [('dwi_file', 'dwi_file')]),
            # Response Estimation
            (self.input_node, resp_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (self.input_node, resp_estim_node,
             [('dwi_brainmask_file', 'in_mask')]),  # B0 brain mask # noqa
            (self.input_node, resp_estim_node, [('grad_fsl', 'grad_fsl')
                                                ]),  # bvecs and bvals # noqa
            (caps_filenames_node, resp_estim_node,
             [('response', 'wm_file')]),  # output response filename # noqa
            # FOD Estimation
            (self.input_node, fod_estim_node, [('dwi_file', 'in_file')]
             ),  # Preproc. DWI # noqa
            (resp_estim_node, fod_estim_node,
             [('wm_file', 'wm_txt')]),  # Response (txt file) # noqa
            (self.input_node, fod_estim_node,
             [('dwi_brainmask_file', 'mask_file')]),  # B0 brain mask # noqa
            (self.input_node, fod_estim_node,
             [('grad_fsl', 'grad_fsl')]),  # T1-to-B0 matrix file # noqa
            (caps_filenames_node, fod_estim_node,
             [('fod', 'wm_odf')]),  # output odf filename # noqa
            # Tracts Generation
            (fod_estim_node, tck_gen_node, [('wm_odf', 'in_file')]
             ),  # ODF file # noqa
            (caps_filenames_node, tck_gen_node,
             [('tracts', 'out_file')]),  # output tck filename # noqa
            # Label Conversion
            (self.input_node, label_convert_node, [('atlas_files', 'in_file')]
             ),  # atlas image files # noqa
            (caps_filenames_node, label_convert_node, [
                ('nodes', 'out_file')
            ]),  # converted atlas image filenames # noqa
            # Connectomes Generation
            (tck_gen_node, conn_gen_node, [('out_file', 'in_file')]),  # noqa
            (caps_filenames_node, conn_gen_node, [('connectomes', 'out_file')
                                                  ]),  # noqa
        ])
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                # MGZ Files Conversion
                (self.input_node, t1_brain_conv_node, [('t1_brain_file',
                                                        'in_file')]),  # noqa
                (self.input_node, wm_mask_conv_node, [('wm_mask_file',
                                                       'in_file')]),  # noqa
                # B0 Extraction
                (self.input_node, split_node, [('dwi_file', 'in_file')]
                 ),  # noqa
                (split_node, select_node, [('out_files', 'inlist')]),  # noqa
                # Masking
                (select_node, mask_node, [('out', 'in_file')]),  # B0 # noqa
                (self.input_node, mask_node,
                 [('dwi_brainmask_file', 'mask_file')]),  # Brain mask # noqa
                # T1-to-B0 Registration
                (t1_brain_conv_node, t12b0_reg_node, [('out_file', 'in_file')]
                 ),  # Brain # noqa
                (mask_node, t12b0_reg_node, [('out_file', 'reference')
                                             ]),  # B0 brain-masked # noqa
                # WM Transformation
                (wm_mask_conv_node, wm_transform_node,
                 [('out_file', 'in_file')]),  # Brain mask # noqa
                (mask_node, wm_transform_node, [('out_file', 'reference')
                                                ]),  # BO brain-masked # noqa
                (t12b0_reg_node, wm_transform_node, [
                    ('out_matrix_file', 'in_matrix_file')
                ]),  # T1-to-B0 matrix file # noqa
                # FSL flirt matrix to MRtrix matrix Conversion
                (t1_brain_conv_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_source_image')]),  # noqa
                (mask_node, fsl2mrtrix_conv_node,
                 [('out_file', 'in_reference_image')]),  # noqa
                (t12b0_reg_node, fsl2mrtrix_conv_node,
                 [('out_matrix_file', 'in_flirt_matrix')]),  # noqa
                # Apply registration without resampling on parcellations
                (label_convert_node, parc_transform_node,
                 [('out_file', 'in_files')]),  # noqa
                (fsl2mrtrix_conv_node, parc_transform_node,
                 [('out_mrtrix_matrix', 'linear_transform')]),  # noqa
                (caps_filenames_node, parc_transform_node,
                 [('nodes', 'out_filename')]),  # noqa
            ])
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters['dwi_space'] == 'b0':
            self.connect([
                (wm_transform_node, tck_gen_node, [('out_file', 'seed_image')
                                                   ]),  # noqa
                (parc_transform_node, conn_gen_node, [('out_file', 'in_parc')
                                                      ]),  # noqa
                (parc_transform_node, self.output_node, [('out_file', 'nodes')
                                                         ]),  # noqa
            ])
        elif self.parameters['dwi_space'] == 'T1w':
            self.connect([
                (self.input_node, tck_gen_node, [('wm_mask_file', 'seed_image')
                                                 ]),  # noqa
                (label_convert_node, conn_gen_node, [('out_file', 'in_parc')
                                                     ]),  # noqa
                (label_convert_node, self.output_node, [('out_file', 'nodes')
                                                        ]),  # noqa
            ])
        else:
            raise ClinicaCAPSError(
                'Bad preprocessed DWI space. Please check your CAPS '
                'folder.')
        # Outputs
        # -------
        self.connect([
            (resp_estim_node, self.output_node, [('wm_file', 'response')]),
            (fod_estim_node, self.output_node, [('wm_odf', 'fod')]),
            (tck_gen_node, self.output_node, [('out_file', 'tracts')]),
            (conn_gen_node, self.output_node, [('out_file', 'connectomes')]),
            (self.input_node, print_end_message, [('dwi_file',
                                                   'in_bids_or_caps_file')]),
            (conn_gen_node, print_end_message, [('out_file', 'final_file')]),
        ])
コード例 #10
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import (
            t1_volume_final_group_template, t1_volume_native_tpm,
            t1_volume_deformation_to_template)
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, 'groups',
                             'group-' + self.parameters['group_id'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_id'], Fore.RESET))

        all_errors = []
        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True))

        # Segmented Tissues
        # =================
        tissues_input = []
        for tissue_number in self.parameters['tissues']:
            try:
                native_space_tpm = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_native_tpm(tissue_number))
                tissues_input.append(native_space_tpm)
            except ClinicaException as e:
                all_errors.append(e)
        # Tissues_input has a length of len(self.parameters['mask_tissues']). Each of these elements has a size of
        # len(self.subjects). We want the opposite : a list of size len(self.subjects) whose elements have a size of
        # len(self.parameters['mask_tissues']. The trick is to iter on elements with zip(*my_list)
        tissues_input_rearranged = []
        for subject_tissue_list in zip(*tissues_input):
            tissues_input_rearranged.append(subject_tissue_list)

        read_input_node.inputs.native_segmentations = tissues_input_rearranged

        # Flow Fields
        # ===========
        try:
            read_input_node.inputs.flowfield_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                t1_volume_deformation_to_template(self.parameters['group_id']))
        except ClinicaException as e:
            all_errors.append(e)

        # Dartel Template
        # ================
        try:
            read_input_node.inputs.template_file = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters['group_id']))
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last a few minutes per image.')

        self.connect([(read_input_node, self.input_node,
                       [('native_segmentations', 'native_segmentations')]),
                      (read_input_node, self.input_node,
                       [('flowfield_files', 'flowfield_files')]),
                      (read_input_node, self.input_node, [('template_file',
                                                           'template_file')])])
コード例 #11
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import (
            ClinicaBIDSError,
            ClinicaCAPSError,
            ClinicaException,
        )
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import (
            T1W_NII,
            T1W_TO_MNI_TRANSFROM,
            bids_pet_nii,
        )
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.pet import get_suvr_mask
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # from clinica.iotools.utils.data_handling import check_volume_location_in_world_coordinate_system
        # Import references files
        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )
        FILE2 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )

        self.ref_template = join(path_to_mask, FILE1.filename)
        self.ref_crop = join(path_to_mask, FILE2.filename)
        self.ref_mask = get_suvr_mask(self.parameters["suvr_reference_region"])

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )
        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Inputs from BIDS directory
        # pet file:
        PET_NII = bids_pet_nii(self.parameters["acq_label"])
        try:
            pet_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, PET_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read pet files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read t1w files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # Inputs from t1-linear pipeline
        # Transformation files from T1w files to MNI:
        try:
            t1w_to_mni_transformation_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                T1W_TO_MNI_TRANSFROM)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read transformation files in your CAPS directory.\n"
                + str(e))
            raise ClinicaCAPSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 3 minutes per image.")

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            iterables=[
                ("t1w", t1w_files),
                ("pet", pet_files),
                ("t1w_to_mni", t1w_to_mni_transformation_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        # fmt: off
        self.connect([
            (read_input_node, self.input_node, [("t1w", "t1w")]),
            (read_input_node, self.input_node, [("pet", "pet")]),
            (read_input_node, self.input_node, [("t1w_to_mni", "t1w_to_mni")]),
        ])
コード例 #12
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import (t1_volume_final_group_template,
                                               pet_volume_normalized_suvr_pet)
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.ux import print_groups_in_caps_directory

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, 'groups',
                             'group-' + self.parameters['group_label'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run pet-volume, t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_label'], Fore.RESET))

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True))
        all_errors = []

        if self.parameters['orig_input_data'] == 't1-volume':
            caps_files_information = {
                'pattern':
                os.path.join(
                    't1', 'spm', 'dartel',
                    'group-' + self.parameters['group_label'],
                    '*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz'
                ),
                'description':
                'graymatter tissue segmented in T1w MRI in Ixi549 space',
                'needed_pipeline':
                't1-volume-tissue-segmentation'
            }
        elif self.parameters['orig_input_data'] == 'pet-volume':
            if not (self.parameters["acq_label"]
                    and self.parameters["suvr_reference_region"]):
                raise ValueError(
                    f"Missing value(s) in parameters from pet-volume pipeline. Given values:\n"
                    f"- acq_label: {self.parameters['acq_label']}\n"
                    f"- suvr_reference_region: {self.parameters['suvr_reference_region']}\n"
                    f"- use_pvc_data: {self.parameters['use_pvc_data']}\n")
            caps_files_information = pet_volume_normalized_suvr_pet(
                acq_label=self.parameters["acq_label"],
                suvr_reference_region=self.parameters["suvr_reference_region"],
                use_brainmasked_image=False,
                use_pvc_data=self.parameters["use_pvc_data"],
                fwhm=0)
        else:
            raise ValueError(
                f"Image type {self.parameters['orig_input_data']} unknown.")

        try:
            input_image = clinica_file_reader(self.subjects, self.sessions,
                                              self.caps_directory,
                                              caps_files_information)
        except ClinicaException as e:
            all_errors.append(e)

        try:
            dartel_input = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters['group_label']))
        except ClinicaException as e:
            all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_parameters_node.inputs.dartel_input = dartel_input
        read_parameters_node.inputs.input_image = input_image

        self.connect([(read_parameters_node, self.input_node,
                       [('dartel_input', 'dartel_input')]),
                      (read_parameters_node, self.input_node,
                       [('input_image', 'input_image')])])
コード例 #13
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1_FS_DESTRIEUX
        from clinica.utils.longitudinal import get_long_id, read_sessions, get_participants_long_id
        from clinica.utils.participant import get_unique_subjects, unique_subjects_sessions_to_subjects_sessions
        from clinica.utils.stream import cprint
        from .longitudinal_utils import extract_participant_long_ids_from_filename, save_part_sess_long_ids_to_tsv

        # Display image(s) already present in CAPS folder
        # ===============================================
        output_ids = self.get_processed_images(self.caps_directory,
                                               self.subjects, self.sessions)
        processed_participants, processed_long_sessions = extract_participant_long_ids_from_filename(
            output_ids)
        if len(processed_participants) > 0:
            cprint(
                "%sClinica found %s participant(s) already processed in CAPS directory:%s"
                % (Fore.YELLOW, len(processed_participants), Fore.RESET))
            for p_id, l_id in zip(processed_participants,
                                  processed_long_sessions):
                cprint("%s\t%s | %s%s" % (Fore.YELLOW, p_id, l_id, Fore.RESET))
            if self.overwrite_caps:
                output_folder = "<CAPS>/subjects/<participant_id>/<long_id>/freesurfer_unbiased_template/"
                cprint("%s\nOutput folders in %s will be recreated.\n%s" %
                       (Fore.YELLOW, output_folder, Fore.RESET))
            else:
                cprint("%s\nParticipant(s) will be ignored by Clinica.\n%s" %
                       (Fore.YELLOW, Fore.RESET))
                input_ids = [
                    p_id + '_' + s_id
                    for p_id, s_id in zip(self.subjects, self.sessions)
                ]
                processed_sessions_per_participant = [
                    read_sessions(self.caps_directory, p_id, l_id)
                    for (p_id, l_id) in zip(processed_participants,
                                            processed_long_sessions)
                ]
                participants, sessions = unique_subjects_sessions_to_subjects_sessions(
                    processed_participants, processed_sessions_per_participant)
                processed_ids = [
                    p_id + '_' + s_id
                    for p_id, s_id in zip(participants, sessions)
                ]
                to_process_ids = list(set(input_ids) - set(processed_ids))
                self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                    to_process_ids)

        # Check that t1-freesurfer has run on the CAPS directory
        try:
            clinica_file_reader(self.subjects, self.sessions,
                                self.caps_directory, T1_FS_DESTRIEUX)
        except ClinicaException as e:
            err_msg = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
                e)
            raise ClinicaCAPSError(err_msg)

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        long_ids = get_participants_long_id(self.subjects, self.sessions)
        save_part_sess_long_ids_to_tsv(self.subjects, self.sessions, long_ids,
                                       folder_participants_tsv)

        [list_participant_id,
         list_list_session_ids] = get_unique_subjects(self.subjects,
                                                      self.sessions)
        list_long_id = [
            get_long_id(list_session_ids)
            for list_session_ids in list_list_session_ids
        ]

        def print_images_to_process(unique_part_list, per_part_session_list,
                                    list_part_long_id):
            cprint(
                'The pipeline will be run on the following %s participant(s):'
                % len(unique_part_list))
            for (part_id, list_sess_id,
                 list_id) in zip(unique_part_list, per_part_session_list,
                                 list_part_long_id):
                sessions_participant = ', '.join(s_id for s_id in list_sess_id)
                cprint("\t%s | %s | %s" %
                       (part_id, sessions_participant, list_id))

        if len(self.subjects):
            # TODO: Generalize long IDs to the message display
            print_images_to_process(list_participant_id, list_list_session_ids,
                                    list_long_id)
            cprint('List available in %s' %
                   os.path.join(folder_participants_tsv, 'participants.tsv'))
            cprint(
                'The pipeline will last approximately 10 hours per participant.'
            )

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('participant_id', list_participant_id),
                ('list_session_ids', list_list_session_ids),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))
        self.connect([
            (read_node, self.input_node, [('participant_id', 'participant_id')
                                          ]),
            (read_node, self.input_node, [('list_session_ids',
                                           'list_session_ids')]),
        ])
コード例 #14
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline.

        Notes:
            - If `FSLOUTPUTTYPE` environment variable is not set, `nipype` takes
            NIFTI by default.

        Todo:
            - [x] Detect space automatically.
            - [ ] Allow for custom parcellations (See TODOs in utils).

        """
        import nipype.interfaces.freesurfer as fs
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.mrtrix3 as mrtrix3
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as npe
        from nipype.interfaces.mrtrix3.tracking import Tractography
        from nipype.interfaces.mrtrix.preprocess import MRTransform

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
        from clinica.lib.nipype.interfaces.mrtrix3.reconst import EstimateFOD
        from clinica.utils.exceptions import ClinicaCAPSError
        from clinica.utils.mri_registration import (
            convert_flirt_transformation_to_mrtrix_transformation,
        )

        # Nodes
        # =====
        # B0 Extraction (only if space=b0)
        # -------------
        split_node = npe.Node(name="Reg-0-DWI-B0Extraction", interface=fsl.Split())
        split_node.inputs.output_type = "NIFTI_GZ"
        split_node.inputs.dimension = "t"
        select_node = npe.Node(name="Reg-0-DWI-B0Selection", interface=niu.Select())
        select_node.inputs.index = 0

        # B0 Brain Extraction (only if space=b0)
        # -------------------
        mask_node = npe.Node(name="Reg-0-DWI-BrainMasking", interface=fsl.ApplyMask())
        mask_node.inputs.output_type = "NIFTI_GZ"

        # T1-to-B0 Registration (only if space=b0)
        # ---------------------
        t12b0_reg_node = npe.Node(
            name="Reg-1-T12B0Registration",
            interface=fsl.FLIRT(
                dof=6,
                interp="spline",
                cost="normmi",
                cost_func="normmi",
            ),
        )
        t12b0_reg_node.inputs.output_type = "NIFTI_GZ"

        # MGZ File Conversion (only if space=b0)
        # -------------------
        t1_brain_conv_node = npe.Node(
            name="Reg-0-T1-T1BrainConvertion", interface=fs.MRIConvert()
        )
        wm_mask_conv_node = npe.Node(
            name="Reg-0-T1-WMMaskConvertion", interface=fs.MRIConvert()
        )

        # WM Transformation (only if space=b0)
        # -----------------
        wm_transform_node = npe.Node(
            name="Reg-2-WMTransformation", interface=fsl.ApplyXFM()
        )
        wm_transform_node.inputs.apply_xfm = True

        # Nodes Generation
        # ----------------
        label_convert_node = npe.MapNode(
            name="0-LabelsConversion",
            iterfield=["in_file", "in_config", "in_lut", "out_file"],
            interface=mrtrix3.LabelConvert(),
        )
        label_convert_node.inputs.in_config = utils.get_conversion_luts()
        label_convert_node.inputs.in_lut = utils.get_luts()

        # FSL flirt matrix to MRtrix matrix Conversion (only if space=b0)
        # --------------------------------------------
        fsl2mrtrix_conv_node = npe.Node(
            name="Reg-2-FSL2MrtrixConversion",
            interface=niu.Function(
                input_names=[
                    "in_source_image",
                    "in_reference_image",
                    "in_flirt_matrix",
                    "name_output_matrix",
                ],
                output_names=["out_mrtrix_matrix"],
                function=convert_flirt_transformation_to_mrtrix_transformation,
            ),
        )

        # Parc. Transformation (only if space=b0)
        # --------------------
        parc_transform_node = npe.MapNode(
            name="Reg-2-ParcTransformation",
            iterfield=["in_files", "out_filename"],
            interface=MRTransform(),
        )

        # Response Estimation
        # -------------------
        resp_estim_node = npe.Node(
            name="1a-ResponseEstimation", interface=mrtrix3.ResponseSD()
        )
        resp_estim_node.inputs.algorithm = "tournier"

        # FOD Estimation
        # --------------
        fod_estim_node = npe.Node(name="1b-FODEstimation", interface=EstimateFOD())
        fod_estim_node.inputs.algorithm = "csd"

        # Tracts Generation
        # -----------------
        tck_gen_node = npe.Node(name="2-TractsGeneration", interface=Tractography())
        tck_gen_node.inputs.select = self.parameters["n_tracks"]
        tck_gen_node.inputs.algorithm = "iFOD2"

        # Connectome Generation
        # ---------------------
        # only the parcellation and output filename should be iterable, the tck
        # file stays the same.
        conn_gen_node = npe.MapNode(
            name="3-ConnectomeGeneration",
            iterfield=["in_parc", "out_file"],
            interface=mrtrix3.BuildConnectome(),
        )

        # Print begin message
        # -------------------
        print_begin_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file"],
                function=utils.print_begin_pipeline,
            ),
            iterfield="in_bids_or_caps_file",
            name="WriteBeginMessage",
        )

        # Print end message
        # -----------------
        print_end_message = npe.MapNode(
            interface=niu.Function(
                input_names=["in_bids_or_caps_file", "final_file"],
                function=utils.print_end_pipeline,
            ),
            iterfield=["in_bids_or_caps_file"],
            name="WriteEndMessage",
        )

        # CAPS File names Generation
        # --------------------------
        caps_filenames_node = npe.Node(
            name="CAPSFilenamesGeneration",
            interface=niu.Function(
                input_names="dwi_file",
                output_names=self.get_output_fields(),
                function=utils.get_caps_filenames,
            ),
        )

        # Connections
        # ===========
        # Computation of the diffusion model, tractography & connectome
        # -------------------------------------------------------------
        # fmt: off
        self.connect(
            [
                (self.input_node, print_begin_message, [("dwi_file", "in_bids_or_caps_file")]),
                (self.input_node, caps_filenames_node, [("dwi_file", "dwi_file")]),
                # Response Estimation
                (self.input_node, resp_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (self.input_node, resp_estim_node, [("dwi_brainmask_file", "in_mask")]),  # B0 brain mask
                (self.input_node, resp_estim_node, [("grad_fsl", "grad_fsl")]),  # bvecs and bvals
                (caps_filenames_node, resp_estim_node, [("response", "wm_file")]),  # output response filename
                # FOD Estimation
                (self.input_node, fod_estim_node, [("dwi_file", "in_file")]),  # Preproc. DWI
                (resp_estim_node, fod_estim_node, [("wm_file", "wm_txt")]),  # Response (txt file)
                (self.input_node, fod_estim_node, [("dwi_brainmask_file", "mask_file")]),  # B0 brain mask
                (self.input_node, fod_estim_node, [("grad_fsl", "grad_fsl")]),  # T1-to-B0 matrix file
                (caps_filenames_node, fod_estim_node, [("fod", "wm_odf")]),  # output odf filename
                # Tracts Generation
                (fod_estim_node, tck_gen_node, [("wm_odf", "in_file")]),  # ODF file
                (caps_filenames_node, tck_gen_node, [("tracts", "out_file")]),  # output tck filename
                # Label Conversion
                (self.input_node, label_convert_node, [("atlas_files", "in_file")]),  # atlas image files
                (caps_filenames_node, label_convert_node, [("nodes", "out_file")]),  # converted atlas image filenames
                # Connectomes Generation
                (tck_gen_node, conn_gen_node, [("out_file", "in_file")]),
                (caps_filenames_node, conn_gen_node, [("connectomes", "out_file")]),
            ]
        )
        # Registration T1-DWI (only if space=b0)
        # -------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    # MGZ Files Conversion
                    (self.input_node, t1_brain_conv_node, [("t1_brain_file", "in_file")]),
                    (self.input_node, wm_mask_conv_node, [("wm_mask_file", "in_file")]),
                    # B0 Extraction
                    (self.input_node, split_node, [("dwi_file", "in_file")]),
                    (split_node, select_node, [("out_files", "inlist")]),
                    # Masking
                    (select_node, mask_node, [("out", "in_file")]),  # B0
                    (self.input_node, mask_node, [("dwi_brainmask_file", "mask_file")]),  # Brain mask
                    # T1-to-B0 Registration
                    (t1_brain_conv_node, t12b0_reg_node, [("out_file", "in_file")]),  # Brain
                    (mask_node, t12b0_reg_node, [("out_file", "reference")]),  # B0 brain-masked
                    # WM Transformation
                    (wm_mask_conv_node, wm_transform_node, [("out_file", "in_file")]),  # Brain mask
                    (mask_node, wm_transform_node, [("out_file", "reference")]),  # BO brain-masked
                    (t12b0_reg_node, wm_transform_node, [("out_matrix_file", "in_matrix_file")]),  # T1-to-B0 matrix file
                    # FSL flirt matrix to MRtrix matrix Conversion
                    (t1_brain_conv_node, fsl2mrtrix_conv_node, [("out_file", "in_source_image")]),
                    (mask_node, fsl2mrtrix_conv_node, [("out_file", "in_reference_image")]),
                    (t12b0_reg_node, fsl2mrtrix_conv_node, [("out_matrix_file", "in_flirt_matrix")]),
                    # Apply registration without resampling on parcellations
                    (label_convert_node, parc_transform_node, [("out_file", "in_files")]),
                    (fsl2mrtrix_conv_node, parc_transform_node, [("out_mrtrix_matrix", "linear_transform")]),
                    (caps_filenames_node, parc_transform_node, [("nodes", "out_filename")]),
                ]
            )
        # Special care for Parcellation & WM mask
        # ---------------------------------------
        if self.parameters["dwi_space"] == "b0":
            self.connect(
                [
                    (wm_transform_node, tck_gen_node, [("out_file", "seed_image")]),
                    (parc_transform_node, conn_gen_node, [("out_file", "in_parc")]),
                    (parc_transform_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        elif self.parameters["dwi_space"] == "T1w":
            self.connect(
                [
                    (self.input_node, tck_gen_node, [("wm_mask_file", "seed_image")]),
                    (label_convert_node, conn_gen_node, [("out_file", "in_parc")]),
                    (label_convert_node, self.output_node, [("out_file", "nodes")]),
                ]
            )
        else:
            raise ClinicaCAPSError(
                "Bad preprocessed DWI space. Please check your CAPS folder."
            )
        # Outputs
        # -------
        self.connect(
            [
                (resp_estim_node, self.output_node, [("wm_file", "response")]),
                (fod_estim_node, self.output_node, [("wm_odf", "fod")]),
                (tck_gen_node, self.output_node, [("out_file", "tracts")]),
                (conn_gen_node, self.output_node, [("out_file", "connectomes")]),
                (self.input_node, print_end_message, [("dwi_file", "in_bids_or_caps_file")]),
                (conn_gen_node, print_end_message, [("out_file", "final_file")]),
            ]
        )
コード例 #15
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import T1_FS_DESTRIEUX
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.longitudinal import (
            get_long_id,
            get_participants_long_id,
            read_sessions,
        )
        from clinica.utils.participant import (
            get_unique_subjects,
            unique_subjects_sessions_to_subjects_sessions,
        )
        from clinica.utils.stream import cprint

        from .longitudinal_utils import (
            extract_participant_long_ids_from_filename,
            save_part_sess_long_ids_to_tsv,
        )

        # Display image(s) already present in CAPS folder
        # ===============================================
        output_ids = self.get_processed_images(
            self.caps_directory, self.subjects, self.sessions
        )
        (
            processed_participants,
            processed_long_sessions,
        ) = extract_participant_long_ids_from_filename(output_ids)
        if len(processed_participants) > 0:
            cprint(
                msg=(
                    f"Clinica found {len(processed_participants)} participant(s) "
                    "already processed in CAPS directory:"
                ),
                lvl="warning",
            )
            for p_id, l_id in zip(processed_participants, processed_long_sessions):
                cprint(f"{p_id} | {l_id}", lvl="warning")
            if self.overwrite_caps:
                output_folder = "<CAPS>/subjects/<participant_id>/<long_id>/freesurfer_unbiased_template/"
                cprint(f"Output folders in {output_folder} will be recreated.", lvl="warning")
            else:
                cprint("Participant(s) will be ignored by Clinica.", lvl="warning")
                input_ids = [
                    p_id + "_" + s_id
                    for p_id, s_id in zip(self.subjects, self.sessions)
                ]
                processed_sessions_per_participant = [
                    read_sessions(self.caps_directory, p_id, l_id)
                    for (p_id, l_id) in zip(
                        processed_participants, processed_long_sessions
                    )
                ]
                participants, sessions = unique_subjects_sessions_to_subjects_sessions(
                    processed_participants, processed_sessions_per_participant
                )
                processed_ids = [
                    p_id + "_" + s_id for p_id, s_id in zip(participants, sessions)
                ]
                to_process_ids = list(set(input_ids) - set(processed_ids))
                self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                    to_process_ids
                )

        # Check that t1-freesurfer has run on the CAPS directory
        try:
            clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory, T1_FS_DESTRIEUX
            )
        except ClinicaException as e:
            err_msg = (
                "Clinica faced error(s) while trying to read files in your CAPS directory.\n"
                + str(e)
            )
            raise ClinicaCAPSError(err_msg)

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        long_ids = get_participants_long_id(self.subjects, self.sessions)
        save_part_sess_long_ids_to_tsv(
            self.subjects, self.sessions, long_ids, folder_participants_tsv
        )

        [list_participant_id, list_list_session_ids] = get_unique_subjects(
            self.subjects, self.sessions
        )
        list_long_id = [
            get_long_id(list_session_ids) for list_session_ids in list_list_session_ids
        ]

        def print_images_to_process(
            unique_part_list, per_part_session_list, list_part_long_id
        ):
            cprint(
                f"The pipeline will be run on the following {len(unique_part_list)} participant(s):"
            )
            for (part_id, list_sess_id, list_id) in zip(
                unique_part_list, per_part_session_list, list_part_long_id
            ):
                sessions_participant = ", ".join(s_id for s_id in list_sess_id)
                cprint(f"\t{part_id} | {sessions_participant} | {list_id}")

        if len(self.subjects):
            # TODO: Generalize long IDs to the message display
            print_images_to_process(
                list_participant_id, list_list_session_ids, list_long_id
            )
            cprint(
                "List available in %s"
                % os.path.join(folder_participants_tsv, "participants.tsv")
            )
            cprint("The pipeline will last approximately 10 hours per participant.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("participant_id", list_participant_id),
                ("list_session_ids", list_list_session_ids),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        # fmt: off
        self.connect(
            [
                (read_node, self.input_node, [("participant_id", "participant_id")]),
                (read_node, self.input_node, [("list_session_ids", "list_session_ids")]),
            ]
        )
コード例 #16
0
ファイル: dwi_dti_pipeline.py プロジェクト: yogeshmj/clinica
    def build_input_node(self):
        """Build and connect an input node to the pipelines.
        """

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        import clinica.utils.input_files as input_files
        from clinica.utils.stream import cprint

        all_errors = []

        # b0 Mask
        try:
            b0_mask = clinica_file_reader(self.subjects, self.sessions,
                                          self.caps_directory,
                                          input_files.DWI_PREPROC_BRAINMASK)
        except ClinicaException as e:
            all_errors.append(e)

        # DWI preprocessing NIfTI
        try:
            dwi_caps = clinica_file_reader(self.subjects, self.sessions,
                                           self.caps_directory,
                                           input_files.DWI_PREPROC_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # bval files
        try:
            bval_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        # bvec files
        try:
            bvec_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True),
                                   iterables=[('preproc_dwi', dwi_caps),
                                              ('preproc_bvec', bvec_files),
                                              ('preproc_bval', bval_files),
                                              ('b0_mask', b0_mask)],
                                   synchronize=True)

        self.connect([(read_input_node, self.input_node, [('b0_mask',
                                                           'b0_mask')]),
                      (read_input_node, self.input_node, [('preproc_dwi',
                                                           'preproc_dwi')]),
                      (read_input_node, self.input_node, [('preproc_bval',
                                                           'preproc_bval')]),
                      (read_input_node, self.input_node, [('preproc_bvec',
                                                           'preproc_bvec')])])
        cprint('The pipeline will last approximately 20 minutes per image.')
コード例 #17
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.input_files import (
            pet_volume_normalized_suvr_pet,
            t1_volume_final_group_template,
        )
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.ux import print_groups_in_caps_directory

        # Check that group already exists
        if not os.path.exists(
            os.path.join(
                self.caps_directory, "groups", "group-" + self.parameters["group_label"]
            )
        ):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                f"Group {self.parameters['group_label']} does not exist. "
                "Did you run pet-volume, t1-volume or t1-volume-create-dartel pipeline?"
            )

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(
                fields=self.get_input_fields(), mandatory_inputs=True
            ),
        )
        all_errors = []

        if self.parameters["orig_input_data"] == "t1-volume":
            caps_files_information = {
                "pattern": os.path.join(
                    "t1",
                    "spm",
                    "dartel",
                    "group-" + self.parameters["group_label"],
                    "*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz",
                ),
                "description": "graymatter tissue segmented in T1w MRI in Ixi549 space",
                "needed_pipeline": "t1-volume-tissue-segmentation",
            }
        elif self.parameters["orig_input_data"] == "pet-volume":
            if not (
                self.parameters["acq_label"]
                and self.parameters["suvr_reference_region"]
            ):
                raise ValueError(
                    f"Missing value(s) in parameters from pet-volume pipeline. Given values:\n"
                    f"- acq_label: {self.parameters['acq_label']}\n"
                    f"- suvr_reference_region: {self.parameters['suvr_reference_region']}\n"
                    f"- use_pvc_data: {self.parameters['use_pvc_data']}\n"
                )
            caps_files_information = pet_volume_normalized_suvr_pet(
                acq_label=self.parameters["acq_label"],
                suvr_reference_region=self.parameters["suvr_reference_region"],
                use_brainmasked_image=False,
                use_pvc_data=self.parameters["use_pvc_data"],
                fwhm=0,
            )
        else:
            raise ValueError(
                f"Image type {self.parameters['orig_input_data']} unknown."
            )

        try:
            input_image = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                caps_files_information,
            )
        except ClinicaException as e:
            all_errors.append(e)

        try:
            dartel_input = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters["group_label"]),
            )
        except ClinicaException as e:
            all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = "Clinica faced errors while trying to read files in your CAPS directories.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_parameters_node.inputs.dartel_input = dartel_input
        read_parameters_node.inputs.input_image = input_image

        # fmt: off
        self.connect(
            [
                (read_parameters_node, self.input_node, [("dartel_input", "dartel_input")]),
                (read_parameters_node, self.input_node, [("input_image", "input_image")]),
            ]
        )
コード例 #18
0
def clinica_file_reader(subjects,
                        sessions,
                        input_directory,
                        information,
                        raise_exception=True):
    """
    This function grabs files relative to a subject and session list according to a glob pattern (using *)
    Args:
        subjects: list of subjects
        sessions: list of sessions (must be same size as subjects, and must correspond )
        input_directory: location of the bids or caps directory
        information: dictionnary containg all the relevant information to look for the files. Dict must contains the
                     following keys : pattern, description. The optional key is: needed_pipeline
                             pattern: define the pattern of the final file
                             description: string to describe what the file is
                             needed_pipeline (optional): string describing the pipeline(s) needed to obtain the related
                                                        file
        raise_exception: if True (normal behavior), an exception is raised if errors happen. If not, we return the file
                        list as it is

    Returns:
         list of files respecting the subject/session order provided in input,
         You should always use clinica_file_reader in the following manner:
         try:
            file_list = clinica_file_reader(...)
         except ClinicaException as e:
            # Deal with the error

    Raises:
        ClinicaCAPSError or ClinicaBIDSError if multiples files are found for 1 subject/session, or no file is found
        If raise_exception is False, no exception is raised

        Examples: (path are shortened for readability)
            - You have the full name of a file:
                File orig_nu.mgz from FreeSurfer of subject sub-ADNI011S4105 session ses-M00 located in mri folder of
                FreeSurfer output :
                    clinica_file_reader(['sub-ADNI011S4105'],
                                        ['ses-M00'],
                                        caps_directory,
                                        {'pattern': 'freesurfer_cross_sectional/sub-*_ses-*/mri/orig_nu.mgz',
                                         'description': 'freesurfer file orig_nu.mgz',
                                         'needed_pipeline': 't1-freesurfer'})
                    gives: ['/caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/mri/orig_nu.mgz']

            - You have a partial name of the file:
                File sub-ADNI011S4105_ses-M00_task-rest_acq-FDG_pet.nii.gz in BIDS directory. Here, filename depends on
                subject and session name :
                     clinica_file_reader(['sub-ADNI011S4105'],
                                         ['ses-M00'],
                                         bids_directory,
                                         {'pattern': '*fdg_pet.nii*',
                                          'description': 'FDG PET data'})
                     gives: ['/bids/sub-ADNI011S4105/ses-M00/pet/sub-ADNI011S4105_ses-M00_task-rest_acq-FDG_pet.nii.gz']

            - Tricky example:
                Get the file rh.white from FreeSurfer:
                If you try:
                    clinica_file_reader(['sub-ADNI011S4105'],
                                        ['ses-M00'],
                                        caps,
                                        {'pattern': 'rh.white',
                                         'description': 'right hemisphere of outter cortical surface.',
                                         'needed_pipeline': 't1-freesurfer'})
                        the following error will arise:
                        * More than 1 file found::
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/fsaverage/surf/rh.white
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/rh.EC_average/surf/rh.white
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/surf/rh.white
                Correct usage (e.g. in pet-surface): pattern string must be 'sub-*_ses-*/surf/rh.white' or even more precise:
                        't1/freesurfer_cross_sectional/sub-*_ses-*/surf/rh.white'
                    It then gives: ['/caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/surf/rh.white']

        Note:
            This function is case insensitive, meaning that the pattern argument can, for example, contain maj letter
            that do not exists in the existing file path.

    """

    from os.path import join
    from colorama import Fore
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaCAPSError

    assert isinstance(
        information, dict), 'A dict must be provided for the argmuent \'dict\''
    assert all(
        elem in information.keys() for elem in ['pattern', 'description']
    ), '\'information\' must contain the keys \'pattern\' and \'description'
    assert all(
        elem in ['pattern', 'description', 'needed_pipeline']
        for elem in information.keys()
    ), '\'information\' can only contain the keys \'pattern\', \'description\' and \'needed_pipeline\''

    pattern = information['pattern']
    is_bids = determine_caps_or_bids(input_directory)

    if is_bids:
        check_bids_folder(input_directory)
    else:
        check_caps_folder(input_directory)

    # Some check on the formatting on the data
    assert pattern[0] != '/', 'pattern argument cannot start with char: / (does not work in os.path.join function). ' \
                              + 'If you want to indicate the exact name of the file, use the format' \
                              + ' directory_name/filename.extension or filename.extension in the pattern argument'
    assert len(subjects) == len(
        sessions), 'Subjects and sessions must have the same length'
    if len(subjects) == 0:
        return []

    # rez is the list containing the results
    results = []
    # error is the list of the errors that happen during the whole process
    error_encountered = []
    for sub, ses in zip(subjects, sessions):
        if is_bids:
            origin_pattern = join(input_directory, sub, ses)
        else:
            origin_pattern = join(input_directory, 'subjects', sub, ses)

        current_pattern = join(origin_pattern, '**/', pattern)
        current_glob_found = insensitive_glob(current_pattern, recursive=True)

        # Error handling if more than 1 file are found, or when no file is found
        if len(current_glob_found) > 1:
            error_str = '\t*' + Fore.BLUE + ' (' + sub + ' | ' + ses + ') ' + Fore.RESET + ': More than 1 file found:\n'
            for found_file in current_glob_found:
                error_str += '\t\t' + found_file + '\n'
            error_encountered.append(error_str)
        elif len(current_glob_found) == 0:
            error_encountered.append('\t*' + Fore.BLUE + ' (' + sub + ' | ' +
                                     ses + ') ' + Fore.RESET +
                                     ': No file found\n')
        # Otherwise the file found is added to the result
        else:
            results.append(current_glob_found[0])

    # We do not raise an error, so that the developper can gather all the problems before Clinica crashes
    if len(error_encountered) > 0 and raise_exception is True:
        error_message = Fore.RED + '\n[Error] Clinica encountered ' + str(len(error_encountered)) \
                        + ' problem(s) while getting ' + information['description'] + ':\n' + Fore.RESET
        if 'needed_pipeline' in information.keys():
            if information['needed_pipeline']:
                error_message += Fore.YELLOW + 'Please note that the following clinica pipeline(s) must have run ' \
                                 'to obtain these files: ' + information['needed_pipeline'] + Fore.RESET + '\n'
        for msg in error_encountered:
            error_message += msg
        if is_bids:
            raise ClinicaBIDSError(error_message)
        else:
            raise ClinicaCAPSError(error_message)
    return results
コード例 #19
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import re

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.utils.input_files as input_files
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.filemanip import save_participants_sessions
        from clinica.utils.inputs import clinica_list_of_files_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # Read CAPS files
        list_caps_files = clinica_list_of_files_reader(
            self.subjects,
            self.sessions,
            self.caps_directory,
            [
                # Inputs from t1-freesurfer pipeline
                input_files.T1_FS_WM,  # list_caps_files[0]
                input_files.T1_FS_DESIKAN,  # list_caps_files[1]
                input_files.T1_FS_DESTRIEUX,  # list_caps_files[2]
                input_files.T1_FS_BRAIN,  # list_caps_files[3]
                # Inputs from dwi-preprocessing pipeline
                input_files.DWI_PREPROC_NII,  # list_caps_files[4]
                input_files.DWI_PREPROC_BRAINMASK,  # list_caps_files[5]
                input_files.DWI_PREPROC_BVEC,  # list_caps_files[6]
                input_files.DWI_PREPROC_BVAL,  # list_caps_files[7]
            ],
            raise_exception=True,
        )

        # Check space of DWI dataset
        dwi_file_spaces = [
            re.search(".*_space-(.*)_preproc.nii.*", file, re.IGNORECASE).group(1)
            for file in list_caps_files[4]
        ]

        # Return an error if all the DWI files are not in the same space
        if any(a != dwi_file_spaces[0] for a in dwi_file_spaces):
            raise ClinicaCAPSError(
                "Preprocessed DWI files are not all in the same space. "
                "Please process them separately using the appropriate subjects/sessions `.tsv` file (-tsv option)."
            )
        list_atlas_files = [
            [aparc_aseg, aparc_aseg_a2009]
            for aparc_aseg, aparc_aseg_a2009 in zip(
                list_caps_files[1], list_caps_files[2]
            )
        ]

        list_grad_fsl = [
            (bvec, bval) for bvec, bval in zip(list_caps_files[6], list_caps_files[7])
        ]

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        save_participants_sessions(
            self.subjects, self.sessions, folder_participants_tsv
        )

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                "Computational time will depend of the number of volumes in your DWI dataset and "
                "the number of streamlines you selected."
            )

        if dwi_file_spaces[0] == "b0":
            self.parameters["dwi_space"] = "b0"
            read_node = npe.Node(
                name="ReadingFiles",
                iterables=[
                    ("wm_mask_file", list_caps_files[0]),
                    ("t1_brain_file", list_caps_files[3]),
                    ("dwi_file", list_caps_files[4]),
                    ("dwi_brainmask_file", list_caps_files[5]),
                    ("grad_fsl", list_grad_fsl),
                    ("atlas_files", list_atlas_files),
                ],
                synchronize=True,
                interface=nutil.IdentityInterface(fields=self.get_input_fields()),
            )
            # fmt: off
            self.connect(
                [
                    (read_node, self.input_node, [("t1_brain_file", "t1_brain_file")]),
                    (read_node, self.input_node, [("wm_mask_file", "wm_mask_file")]),
                    (read_node, self.input_node, [("dwi_file", "dwi_file")]),
                    (read_node, self.input_node, [("dwi_brainmask_file", "dwi_brainmask_file")]),
                    (read_node, self.input_node, [("grad_fsl", "grad_fsl")]),
                    (read_node, self.input_node, [("atlas_files", "atlas_files")]),
                ]
            )
            # fmt: on
        elif dwi_file_spaces[0] == "T1w":
            self.parameters["dwi_space"] = "T1w"
            read_node = npe.Node(
                name="ReadingFiles",
                iterables=[
                    ("wm_mask_file", list_caps_files[0]),
                    ("dwi_file", list_caps_files[4]),
                    ("dwi_brainmask_file", list_caps_files[5]),
                    ("grad_fsl", list_grad_fsl),
                    ("atlas_files", list_atlas_files),
                ],
                synchronize=True,
                interface=nutil.IdentityInterface(fields=self.get_input_fields()),
            )
            # fmt: off
            self.connect(
                [
                    (read_node, self.input_node, [("wm_mask_file", "wm_mask_file")]),
                    (read_node, self.input_node, [("dwi_file", "dwi_file")]),
                    (read_node, self.input_node, [("dwi_brainmask_file", "dwi_brainmask_file")]),
                    (read_node, self.input_node, [("grad_fsl", "grad_fsl")]),
                    (read_node, self.input_node, [("atlas_files", "atlas_files")]),
                ]
            )
            # fmt: on
        else:
            raise ClinicaCAPSError(
                "Bad preprocessed DWI space. Please check your CAPS folder."
            )
コード例 #20
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.iotools.grabcaps import CAPSLayout
        from clinica.utils.stream import cprint

        from clinica.utils.exceptions import ClinicaCAPSError

        # Reading BIDS files
        # ==================

        # cprint('Loading CAPS folder...')
        caps_layout = CAPSLayout(self.caps_directory)
        # cprint('CAPS folder loaded')

        wm_mask_file = []
        dwi_file_space = []
        t1_brain_file = []
        dwi_file = []
        dwi_brainmask_file = []
        bvec = []
        bval = []
        grad_fsl = []
        aparc_aseg = []
        aparc_aseg_a2009s = []
        atlas_files = []
        cprint('Extracting files...')
        for i in range(len(self.subjects)):
            cprint('\t...subject \'' + str(
                    self.subjects[i][4:]) + '\', session \'' + str(
                    self.sessions[i][4:]) + '\'')

            wm_mask_file.append(caps_layout.get(freesurfer_file='wm.seg.mgz',
                                                session=self.sessions[i][4:],
                                                subject=self.subjects[i][4:],
                                                return_type='file')[0])

            dwi_file_space.append(
                    caps_layout.get(type='dwi', suffix='preproc',
                                    target='space',
                                    session=self.sessions[i][4:],
                                    extensions='nii.gz',
                                    subject=self.subjects[i][4:],
                                    return_type='id')[0])

            if dwi_file_space[i] == 'b0':
                t1_brain_file.append(
                        caps_layout.get(freesurfer_file='brain.mgz',
                                        session=self.sessions[i][4:],
                                        subject=self.subjects[i][4:],
                                        return_type='file')[0])

            aparc_aseg.append(
                    caps_layout.get(freesurfer_file='aparc\+aseg.mgz',
                                    session=self.sessions[i][4:],
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            aparc_aseg_a2009s.append(
                    caps_layout.get(freesurfer_file='aparc.a2009s\+aseg.mgz',
                                    session=self.sessions[i][4:],
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            atlas_files.append([aparc_aseg[i], aparc_aseg_a2009s[i]])

            dwi_file.append(
                    caps_layout.get(type='dwi', suffix='preproc',
                                    space=dwi_file_space[i],
                                    session=self.sessions[i][4:],
                                    extensions='nii.gz',
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            dwi_brainmask_file.append(
                    caps_layout.get(type='dwi', suffix='brainmask',
                                    space=dwi_file_space[i],
                                    session=self.sessions[i][4:],
                                    extensions='nii.gz',
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            bvec.append(
                    caps_layout.get(type='dwi', suffix='preproc',
                                    space=dwi_file_space[i],
                                    session=self.sessions[i][4:],
                                    extensions='bvec',
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            bval.append(
                    caps_layout.get(type='dwi', suffix='preproc',
                                    space=dwi_file_space[i],
                                    session=self.sessions[i][4:],
                                    extensions='bval',
                                    subject=self.subjects[i][4:],
                                    return_type='file')[0])

            grad_fsl.append((bvec[i], bval[i]))

        # Return an error if all the DWI files are not in the same space
        if any(a != dwi_file_space[0] for a in dwi_file_space):
            raise ClinicaCAPSError('Preprocessed DWI files are not all in the '
                                   'same space. Please process them separately '
                                   'using the appropriate subjects/sessions '
                                   '`.tsv` file (-tsv option).')

        elif dwi_file_space[0] == 'b0':
            self.parameters['dwi_space'] = 'b0'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[
                                     ('wm_mask_file', wm_mask_file),
                                     ('t1_brain_file', t1_brain_file),
                                     ('dwi_file', dwi_file),
                                     ('dwi_brainmask_file', dwi_brainmask_file),
                                     ('grad_fsl', grad_fsl),
                                     ('atlas_files', atlas_files)
                                 ],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                         fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('t1_brain_file', 't1_brain_file')]),
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file', 'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        elif dwi_file_space[0] == 'T1w':
            self.parameters['dwi_space'] = 'T1w'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[
                                     ('wm_mask_file', wm_mask_file),
                                     ('dwi_file', dwi_file),
                                     ('dwi_brainmask_file', dwi_brainmask_file),
                                     ('grad_fsl', grad_fsl),
                                     ('atlas_files', atlas_files)
                                 ],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                         fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file', 'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        else:
            raise ClinicaCAPSError('Bad preprocessed DWI space. Please check '
                                   'your CAPS folder.')
コード例 #21
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import t1_volume_final_group_template
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.ux import print_groups_in_caps_directory

        # Check that group already exists
        if not os.path.exists(os.path.join(self.caps_directory, 'groups', 'group-' + self.parameters['group_id'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run pet-volume, t1-volume or t1-volume-create-dartel pipeline?%s' %
                (Fore.RED, self.parameters['group_id'], Fore.RESET)
            )

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                                                          mandatory_inputs=True))
        all_errors = []

        if self.parameters['image_type'] == 't1':
            caps_files_information = {
                'pattern': os.path.join('t1', 'spm', 'dartel', 'group-' + self.parameters['group_id'],
                                        '*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz'),
                'description': 'graymatter tissue segmented in T1w MRI in Ixi549 space',
                'needed_pipeline': 't1-volume-tissue-segmentation'
            }
        elif self.parameters['image_type'] is 'pet':
            if self.parameters['no_pvc']:
                caps_files_information = {
                    'pattern': os.path.join('pet', 'preprocessing', 'group-' + self.parameters['group_id'],
                                            '*_pet_space-Ixi549Space_suvr-pons_pet.nii.gz'),
                    'description': self.parameters['pet_tracer'] + ' PET in Ixi549 space',
                    'needed_pipeline': 'pet-volume'
                }
            else:
                caps_files_information = {
                    'pattern': os.path.join('pet', 'preprocessing', 'group-' + self.parameters['group_id'],
                                            '*_pet_space-Ixi549Space_pvc-rbv_suvr-pons_pet.nii.gz'),
                    'description': self.parameters['pet_tracer'] + ' PET partial volume corrected (RBV) in Ixi549 space',
                    'needed_pipeline': 'pet-volume with PVC'
                }
        else:
            raise ValueError('Image type ' + self.parameters['image_type'] + ' unknown.')

        try:
            input_image = clinica_file_reader(self.subjects,
                                              self.sessions,
                                              self.caps_directory,
                                              caps_files_information)
        except ClinicaException as e:
            all_errors.append(e)

        try:
            dartel_input = clinica_group_reader(self.caps_directory,
                                                t1_volume_final_group_template(self.parameters['group_id']))
        except ClinicaException as e:
            all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_parameters_node.inputs.dartel_input = dartel_input
        read_parameters_node.inputs.input_image = input_image

        self.connect([
            (read_parameters_node,      self.input_node,    [('dartel_input',    'dartel_input')]),
            (read_parameters_node,      self.input_node,    [('input_image',    'input_image')])

        ])