예제 #1
0
파일: inputs.py 프로젝트: yogeshmj/clinica
def check_bids_folder(bids_directory):
    """
    check_bids_folder function checks the following items:
        - bids_directory is a string
        - the provided path exists and is a directory
        - provided path is not a CAPS folder (BIDS and CAPS could be swapped by user). We simply check that there is
          not a folder called 'subjects' in the provided path (that exists in CAPS hierarchy)
        - provided folder is not empty
        - provided folder must contains at least one directory whose name starts with 'sub-'
    """
    from os.path import isdir, join
    from os import listdir
    from colorama import Fore
    from clinica.utils.exceptions import ClinicaBIDSError

    assert isinstance(bids_directory, str), 'Argument you provided to check_bids_folder() is not a string.'

    if not isdir(bids_directory):
        raise ClinicaBIDSError(Fore.RED + '\n[Error] The BIDS directory you gave is not a folder.\n' + Fore.RESET
                               + Fore.YELLOW + '\nError explanations:\n' + Fore.RESET
                               + ' - Clinica expected the following path to be a folder:' + Fore.BLUE + bids_directory
                               + Fore.RESET + '\n'
                               + ' - If you gave relative path, did you run Clinica on the good folder?')

    if isdir(join(bids_directory, 'subjects')):
        raise ClinicaBIDSError(Fore.RED + '\n[Error] The BIDS directory (' + bids_directory + ') you provided seems to '
                               + 'be a CAPS directory due to the presence of a \'subjects\' folder.' + Fore.RESET)

    if len(listdir(bids_directory)) == 0:
        raise ClinicaBIDSError(Fore.RED + '\n[Error] The BIDS directory you provided  is empty. (' + bids_directory
                               + ').' + Fore.RESET)

    if len([item for item in listdir(bids_directory) if item.startswith('sub-')]) == 0:
        raise ClinicaBIDSError(Fore.RED + '\n[Error] Your BIDS directory does not contains a single folder whose name '
                               + 'starts with \'sub-\'. Check that your folder follow BIDS standard' + Fore.RESET)
예제 #2
0
파일: inputs.py 프로젝트: ghisvail/clinica
def check_bids_folder(bids_directory):
    """Check BIDS folder.

    This function checks the following items:
        - bids_directory is a string
        - the provided path exists and is a directory
        - provided path is not a CAPS folder (BIDS and CAPS could be swapped by user). We simply check that there is
          not a folder called 'subjects' in the provided path (that exists in CAPS hierarchy)
        - provided folder is not empty
        - provided folder must contains at least one directory whose name starts with 'sub-'
    """
    from os import listdir
    from os.path import isdir, join

    from colorama import Fore

    from clinica.utils.exceptions import ClinicaBIDSError

    assert isinstance(
        bids_directory,
        str), "Argument you provided to check_bids_folder() is not a string."

    if not isdir(bids_directory):
        raise ClinicaBIDSError(
            f"{Fore.RED}\n[Error] The BIDS directory you gave is not a folder.\n{Fore.RESET}"
            f"{Fore.YELLOW}\nError explanations:\n{Fore.RESET}"
            f" - Clinica expected the following path to be a folder: {Fore.BLUE}{bids_directory}{Fore.RESET}\n"
            f" - If you gave relative path, did you run Clinica on the good folder?"
        )

    if isdir(join(bids_directory, "subjects")):
        raise ClinicaBIDSError(
            f"{Fore.RED}\n[Error] The BIDS directory ({bids_directory}) you provided seems to "
            f"be a CAPS directory due to the presence of a 'subjects' folder.{Fore.RESET}"
        )

    if len(listdir(bids_directory)) == 0:
        raise ClinicaBIDSError(
            f"{Fore.RED}\n[Error] The BIDS directory you provided  is empty. ({bids_directory}).{Fore.RESET}"
        )

    if len([
            item for item in listdir(bids_directory) if item.startswith("sub-")
    ]) == 0:
        raise ClinicaBIDSError(
            f"{Fore.RED}\n[Error] Your BIDS directory does not contains a single folder whose name "
            f"starts with 'sub-'. Check that your folder follow BIDS standard.{Fore.RESET}"
        )
예제 #3
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_LINEAR
        from clinica.utils.input_files import T1W_LINEAR_CROPPED
        from clinica.utils.ux import print_images_to_process

        if self.parameters.get('use_uncropped_image'):
            FILE_TYPE = T1W_LINEAR
        else:
            FILE_TYPE = T1W_LINEAR_CROPPED

        # T1w_Linear file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.caps_directory, FILE_TYPE)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
                e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 30 seconds per image.'
                   )  # Replace by adequate computational time.

        if self.parameters.get('extract_method') == 'slice':
            self.slice_direction = self.parameters.get('slice_direction')
            self.slice_mode = self.parameters.get('slice_mode')
        else:
            self.slice_direction = 'axial'
            self.slice_mode = 'rgb'

        if self.parameters.get('extract_method') == 'patch':
            self.patch_size = self.parameters.get('patch_size')
            self.stride_size = self.parameters.get('stride_size')
        else:
            self.patch_size = 50
            self.stride_size = 50

        # The reading node
        # -------------------------
        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('input_nifti', t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))

        self.connect([
            (read_node, self.input_node, [('input_nifti', 'input_nifti')]),
        ])
예제 #4
0
def clinica_list_of_files_reader(
    participant_ids,
    session_ids,
    bids_or_caps_directory,
    list_information,
    raise_exception=True,
):
    """Read list of BIDS or CAPS files.

    This function iterates calls of clinica_file_reader to extract input files based on information given by
    `list_information`.

    Args:
        participant_ids (List[str]): List of participant IDs
            (e.g. ['sub-CLNC01', 'sub-CLNC01', 'sub-CLNC02'])
        session_ids (List[str]): List of sessions ID associated to `participant_ids`
            (e.g. ['ses-M00', 'ses-M18', 'ses-M00'])
        bids_or_caps_directory (str): BIDS of CAPS directory
        list_information (List[Dict]): List of dictionaries described in clinica_file_reader
        raise_exception (bool, optional): Raise Exception or not. Defaults to True.

    Returns:
        List[List[str]]: List of list of found files following order of `list_information`
    """
    from .exceptions import ClinicaException, ClinicaBIDSError

    all_errors = []
    list_found_files = []
    for info_file in list_information:
        try:
            list_found_files.append(
                clinica_file_reader(
                    participant_ids,
                    session_ids,
                    bids_or_caps_directory,
                    info_file,
                    True,
                ))
        except ClinicaException as e:
            list_found_files.append([])
            all_errors.append(e)

    if len(all_errors) > 0 and raise_exception:
        error_message = "Clinica faced error(s) while trying to read files in your BIDS or CAPS directory.\n"
        for msg in all_errors:
            error_message += str(msg)
        raise ClinicaBIDSError(error_message)

    return list_found_files
예제 #5
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.

        Raise:
            ClinicaBIDSError: If there are duplicated files or missing files for any subject
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.iotools.utils.data_handling import (
            check_volume_location_in_world_coordinate_system, )
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = f"Clinica faced error(s) while trying to read files in your BIDS directory.\n{str(e)}"
            raise ClinicaBIDSError(err)

        check_volume_location_in_world_coordinate_system(
            t1w_files,
            self.bids_directory,
            skip_question=self.parameters["skip_question"],
        )

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                "The pipeline will last approximately 10 minutes per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        self.connect([
            (read_node, self.input_node, [("t1w", "t1w")]),
        ])
예제 #6
0
def clinica_file_reader(subjects,
                        sessions,
                        input_directory,
                        information,
                        raise_exception=True):
    """
    This function grabs files relative to a subject and session list according to a glob pattern (using *)
    Args:
        subjects: list of subjects
        sessions: list of sessions (must be same size as subjects, and must correspond )
        input_directory: location of the bids or caps directory
        information: dictionnary containg all the relevant information to look for the files. Dict must contains the
                     following keys : pattern, description. The optional key is: needed_pipeline
                             pattern: define the pattern of the final file
                             description: string to describe what the file is
                             needed_pipeline (optional): string describing the pipeline(s) needed to obtain the related
                                                        file
        raise_exception: if True (normal behavior), an exception is raised if errors happen. If not, we return the file
                        list as it is

    Returns:
         list of files respecting the subject/session order provided in input,
         You should always use clinica_file_reader in the following manner:
         try:
            file_list = clinica_file_reader(...)
         except ClinicaException as e:
            # Deal with the error

    Raises:
        ClinicaCAPSError or ClinicaBIDSError if multiples files are found for 1 subject/session, or no file is found
        If raise_exception is False, no exception is raised

        Examples: (path are shortened for readability)
            - You have the full name of a file:
                File orig_nu.mgz from FreeSurfer of subject sub-ADNI011S4105 session ses-M00 located in mri folder of
                FreeSurfer output :
                    clinica_file_reader(['sub-ADNI011S4105'],
                                        ['ses-M00'],
                                        caps_directory,
                                        {'pattern': 'freesurfer_cross_sectional/sub-*_ses-*/mri/orig_nu.mgz',
                                         'description': 'freesurfer file orig_nu.mgz',
                                         'needed_pipeline': 't1-freesurfer'})
                    gives: ['/caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/mri/orig_nu.mgz']

            - You have a partial name of the file:
                File sub-ADNI011S4105_ses-M00_task-rest_acq-FDG_pet.nii.gz in BIDS directory. Here, filename depends on
                subject and session name :
                     clinica_file_reader(['sub-ADNI011S4105'],
                                         ['ses-M00'],
                                         bids_directory,
                                         {'pattern': '*fdg_pet.nii*',
                                          'description': 'FDG PET data'})
                     gives: ['/bids/sub-ADNI011S4105/ses-M00/pet/sub-ADNI011S4105_ses-M00_task-rest_acq-FDG_pet.nii.gz']

            - Tricky example:
                Get the file rh.white from FreeSurfer:
                If you try:
                    clinica_file_reader(['sub-ADNI011S4105'],
                                        ['ses-M00'],
                                        caps,
                                        {'pattern': 'rh.white',
                                         'description': 'right hemisphere of outter cortical surface.',
                                         'needed_pipeline': 't1-freesurfer'})
                        the following error will arise:
                        * More than 1 file found::
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/fsaverage/surf/rh.white
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/rh.EC_average/surf/rh.white
                            /caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/surf/rh.white
                Correct usage (e.g. in pet-surface): pattern string must be 'sub-*_ses-*/surf/rh.white' or even more precise:
                        't1/freesurfer_cross_sectional/sub-*_ses-*/surf/rh.white'
                    It then gives: ['/caps/subjects/sub-ADNI011S4105/ses-M00/t1/freesurfer_cross_sectional/sub-ADNI011S4105_ses-M00/surf/rh.white']

        Note:
            This function is case insensitive, meaning that the pattern argument can, for example, contain maj letter
            that do not exists in the existing file path.

    """

    from os.path import join
    from colorama import Fore
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaCAPSError

    assert isinstance(
        information, dict), 'A dict must be provided for the argmuent \'dict\''
    assert all(
        elem in information.keys() for elem in ['pattern', 'description']
    ), '\'information\' must contain the keys \'pattern\' and \'description'
    assert all(
        elem in ['pattern', 'description', 'needed_pipeline']
        for elem in information.keys()
    ), '\'information\' can only contain the keys \'pattern\', \'description\' and \'needed_pipeline\''

    pattern = information['pattern']
    is_bids = determine_caps_or_bids(input_directory)

    if is_bids:
        check_bids_folder(input_directory)
    else:
        check_caps_folder(input_directory)

    # Some check on the formatting on the data
    assert pattern[0] != '/', 'pattern argument cannot start with char: / (does not work in os.path.join function). ' \
                              + 'If you want to indicate the exact name of the file, use the format' \
                              + ' directory_name/filename.extension or filename.extension in the pattern argument'
    assert len(subjects) == len(
        sessions), 'Subjects and sessions must have the same length'
    if len(subjects) == 0:
        return []

    # rez is the list containing the results
    results = []
    # error is the list of the errors that happen during the whole process
    error_encountered = []
    for sub, ses in zip(subjects, sessions):
        if is_bids:
            origin_pattern = join(input_directory, sub, ses)
        else:
            origin_pattern = join(input_directory, 'subjects', sub, ses)

        current_pattern = join(origin_pattern, '**/', pattern)
        current_glob_found = insensitive_glob(current_pattern, recursive=True)

        # Error handling if more than 1 file are found, or when no file is found
        if len(current_glob_found) > 1:
            error_str = '\t*' + Fore.BLUE + ' (' + sub + ' | ' + ses + ') ' + Fore.RESET + ': More than 1 file found:\n'
            for found_file in current_glob_found:
                error_str += '\t\t' + found_file + '\n'
            error_encountered.append(error_str)
        elif len(current_glob_found) == 0:
            error_encountered.append('\t*' + Fore.BLUE + ' (' + sub + ' | ' +
                                     ses + ') ' + Fore.RESET +
                                     ': No file found\n')
        # Otherwise the file found is added to the result
        else:
            results.append(current_glob_found[0])

    # We do not raise an error, so that the developper can gather all the problems before Clinica crashes
    if len(error_encountered) > 0 and raise_exception is True:
        error_message = Fore.RED + '\n[Error] Clinica encountered ' + str(len(error_encountered)) \
                        + ' problem(s) while getting ' + information['description'] + ':\n' + Fore.RESET
        if 'needed_pipeline' in information.keys():
            if information['needed_pipeline']:
                error_message += Fore.YELLOW + 'Please note that the following clinica pipeline(s) must have run ' \
                                 'to obtain these files: ' + information['needed_pipeline'] + Fore.RESET + '\n'
        for msg in error_encountered:
            error_message += msg
        if is_bids:
            raise ClinicaBIDSError(error_message)
        else:
            raise ClinicaCAPSError(error_message)
    return results
예제 #7
0
def center_all_nifti(bids_dir, output_dir, modality, center_all_files=False):
    """
    Center all the NIfTI images of the input BIDS folder into the empty output_dir specified in argument.
    All the files from bids_dir are copied into output_dir, then all the NIfTI images we can found are replaced by their
    centered version if their center if off the origin by more than 50 mm.

    Args:
        bids_dir: (str) path to bids directory
        output_dir: (str) path to EMPTY output directory
        modality: (list of str) modalities to convert
        center_all_files: (bool) center only files that may cause problem for SPM if false. If true, center all NIfTI

    Returns:
        List of the centered files
    """
    from colorama import Fore
    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.exceptions import ClinicaBIDSError
    from os.path import join, basename
    from glob import glob
    from os import listdir
    from os.path import isdir, isfile
    from shutil import copy2, copytree

    # output and input must be different, so that we do not mess with user's data
    if bids_dir == output_dir:
        raise ClinicaBIDSError(
            Fore.RED +
            '[Error] Input BIDS and output directories must be different' +
            Fore.RESET)

    assert isinstance(modality, list), 'modality arg must be a list of str'

    # check that input is a BIDS dir
    check_bids_folder(bids_dir)

    for f in listdir(bids_dir):
        if isdir(join(bids_dir, f)) and not isdir(join(output_dir, f)):
            copytree(join(bids_dir, f), join(output_dir, f))
        elif isfile(join(bids_dir, f)) and not isfile(join(output_dir, f)):
            copy2(join(bids_dir, f), output_dir)

    pattern = join(output_dir, '**/*.nii*')
    nifti_files = glob(pattern, recursive=True)

    # Now filter this list by elements in modality list
    #   For each file:
    #       if any modality name (lowercase) is found in the basename of the file:
    #           keep the file
    nifti_files_filtered = [
        f for f in nifti_files
        if any(elem.lower() in basename(f).lower() for elem in modality)
    ]

    # Remove those who are centered
    if not center_all_files:
        nifti_files_filtered = [
            file for file in nifti_files_filtered if not is_centered(file)
        ]

    all_errors = []
    for f in nifti_files_filtered:
        print('Handling ' + f)
        _, current_error = center_nifti_origin(f, f)
        if current_error:
            all_errors.append(current_error)
    if len(all_errors) > 0:
        final_error_msg = Fore.RED + '[Error] Clinica encoutered ' + str(len(all_errors)) \
                          + ' error(s) while trying to center all NIfTI images.\n'
        for error in all_errors:
            final_error_msg += '\n' + error
        raise RuntimeError(final_error_msg)
    return nifti_files_filtered
예제 #8
0
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
    This preprocessing pipeline includes globally three steps:
    1) N4 bias correction (performed with ANTS).
    2) Linear registration to MNI (MNI icbm152 nlinear sym template)
       (performed with ANTS) - RegistrationSynQuick.
    3) Cropping the background (in order to save computational power).
    4) Histogram-based intensity normalization. This is a custom function
       performed by the binary ImageMath included with ANTS. 

    Parameters
    ----------
    bids_directory: str
       Folder with BIDS structure.
    caps_directory: str
       Folder where CAPS structure will be stored.
    working_directory: str
       Folder containing a temporary space to save intermediate results.
   """

    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinicadl.tools.inputs.input import fetch_file
    from os.path import dirname, join, abspath, split, exists
    from os import pardir

    check_bids_folder(bids_directory)
    input_dir = bids_directory
    is_bids_dir = True
    base_dir = working_directory

    root = dirname(abspath(join(abspath(__file__), pardir)))
    path_to_mask = join(root, 'resources', 'masks')
    ref_template = join(path_to_mask, 'mni_icbm152_t1_tal_nlin_sym_09c.nii')
    ref_crop = join(path_to_mask, 'ref_cropped_template.nii.gz')
    url1 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/ref_cropped_template.nii.gz"
    url2 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/mni_icbm152_t1_tal_nlin_sym_09c.nii"
    if not (exists(ref_template)):
        try:
            fetch_file(url2, ref_template)
        except IOError as err:
            print(
                'Unable to download required template (mni_icbm152) for processing:',
                err)

    if not (exists(ref_crop)):
        try:
            fetch_file(url1, ref_crop)
        except IOError as err:
            print(
                'Unable to download required template (ref_crop) for processing:',
                err)

    sessions, subjects = get_subject_session_list(input_dir, tsv, is_bids_dir,
                                                  False, base_dir)

    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants
    from clinica.utils.filemanip import get_subject_id

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
       Returns:
       A list of (string) input fields name.
       """
        return ['t1w']

    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    ## The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(name='n4biascorrection',
                                interface=ants.N4BiasFieldCorrection(
                                    dimension=3,
                                    save_bias=True,
                                    bspline_fitting_distance=600))

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(name='antsRegistrationSynQuick',
                                      interface=ants.RegistrationSynQuick())
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(name='cropnifti',
                         interface=nutil.Function(
                             function=crop_nifti,
                             input_names=['input_img', 'ref_crop'],
                             output_names=['output_img', 'crop_template']))
    cropnifti.inputs.ref_crop = ref_crop

    #### Deprecrecated ####
    #### This step was not used in the final version ####
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

    #   from .T1_linear_utils import ants_histogram_intensity_normalization
    #
    #   ## histogram-based intensity normalization
    #   intensitynorm = npe.Node(
    #           name='intensitynormalization',
    #           interface=nutil.Function(
    #               input_names=['image_dimension', 'crop_template', 'input_img'],
    #               output_names=['output_img'],
    #               function=ants_histogram_intensity_normalization
    #               )
    #           )
    #   intensitynorm.inputs.image_dimension = 3

    ## DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    ## Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')
                                                 ]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),
        (n4biascorrection, ants_registration_node, [('output_image',
                                                     'moving_image')]),
        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', 'affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'),
                                       'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        #(get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')
                                              ]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
    ])

    return wf
    def build_input_node(self):
        """Build and connect an input node to the pipelines.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.utils.input_files as input_files
        from clinica.utils.dwi import check_dwi_volume
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.inputs import clinica_file_reader

        all_errors = []
        try:
            t1w_files = clinica_file_reader(self.subjects,
                                            self.sessions,
                                            self.bids_directory,
                                            input_files.T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)
        try:
            dwi_files = clinica_file_reader(self.subjects,
                                            self.sessions,
                                            self.bids_directory,
                                            input_files.DWI_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # bval files
        try:
            bval_files = clinica_file_reader(self.subjects,
                                             self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        # bvec files
        try:
            bvec_files = clinica_file_reader(self.subjects,
                                             self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaBIDSError(error_message)

        # Perform the check after potential issue while reading inputs
        for (dwi, bvec, bval) in zip(dwi_files, bvec_files, bval_files):
            check_dwi_volume(in_dwi=dwi, in_bvec=bvec, in_bval=bval)

        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True),
                                   iterables=[('T1w', t1w_files),
                                              ('dwi', dwi_files),
                                              ('bvec', bvec_files),
                                              ('bval', bval_files)],
                                   synchronize=True)

        self.connect([
            (read_input_node, self.input_node, [('T1w', 'T1w')]),
            (read_input_node, self.input_node, [('dwi', 'dwi')]),
            (read_input_node, self.input_node, [('bvec', 'bvec')]),
            (read_input_node, self.input_node, [('bval', 'bval')])
        ])
예제 #10
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        from os import pardir
        from os.path import dirname, join, abspath, exists
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import fetch_file, RemoteFileStructure
        from clinica.utils.ux import print_images_to_process
        from clinica.utils.stream import cprint

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, 'resources', 'masks')
        url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
        FILE1 = RemoteFileStructure(
            filename='ref_cropped_template.nii.gz',
            url=url_aramis,
            checksum=
            '67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3')
        FILE2 = RemoteFileStructure(
            filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
            url=url_aramis,
            checksum=
            '93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34')

        self.ref_template = join(path_to_mask, FILE2.filename)
        self.ref_crop = join(path_to_mask, FILE1.filename)

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (mni_icbm152) for processing:',
                    err)

        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (ref_crop) for processing:',
                    err)

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(
                "%sClinica found %s image(s) already processed in CAPS directory:%s"
                % (Fore.YELLOW, len(processed_ids), Fore.RESET))
            for image_id in processed_ids:
                cprint("%s\t%s%s" %
                       (Fore.YELLOW, image_id.replace('_', ' | '), Fore.RESET))
            cprint("%s\nImage(s) will be ignored by Clinica.\n%s" %
                   (Fore.YELLOW, Fore.RESET))
            input_ids = [
                p_id + '_' + s_id
                for p_id, s_id in zip(self.subjects, self.sessions)
            ]
            to_process_ids = list(set(input_ids) - set(processed_ids))
            self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n' + str(
                e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 6 minutes per image.')

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('t1w', t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))
        self.connect([
            (read_node, self.input_node, [('t1w', 't1w')]),
        ])
예제 #11
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import (
            ClinicaBIDSError,
            ClinicaCAPSError,
            ClinicaException,
        )
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import (
            T1W_NII,
            T1W_TO_MNI_TRANSFROM,
            bids_pet_nii,
        )
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.pet import get_suvr_mask
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # from clinica.iotools.utils.data_handling import check_volume_location_in_world_coordinate_system
        # Import references files
        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )
        FILE2 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )

        self.ref_template = join(path_to_mask, FILE1.filename)
        self.ref_crop = join(path_to_mask, FILE2.filename)
        self.ref_mask = get_suvr_mask(self.parameters["suvr_reference_region"])

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )
        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Inputs from BIDS directory
        # pet file:
        PET_NII = bids_pet_nii(self.parameters["acq_label"])
        try:
            pet_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, PET_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read pet files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read t1w files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # Inputs from t1-linear pipeline
        # Transformation files from T1w files to MNI:
        try:
            t1w_to_mni_transformation_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                T1W_TO_MNI_TRANSFROM)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read transformation files in your CAPS directory.\n"
                + str(e))
            raise ClinicaCAPSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 3 minutes per image.")

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            iterables=[
                ("t1w", t1w_files),
                ("pet", pet_files),
                ("t1w_to_mni", t1w_to_mni_transformation_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        # fmt: off
        self.connect([
            (read_input_node, self.input_node, [("t1w", "t1w")]),
            (read_input_node, self.input_node, [("pet", "pet")]),
            (read_input_node, self.input_node, [("t1w_to_mni", "t1w_to_mni")]),
        ])
예제 #12
0
    def build_input_node(self):
        """Build and connect an input node to the pipelines.

        References:
            https://lcni.uoregon.edu/kb-articles/kb-0003

        """

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        import json
        import numpy as np
        from clinica.utils.stream import cprint
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        import clinica.utils.input_files as input_files

        # Reading BIDS files
        # ==================
        read_node = npe.Node(name="ReadingBIDS",
                             interface=nutil.IdentityInterface(
                                 fields=self.get_input_fields(),
                                 mandatory_inputs=True))

        # Store all the potentials errors
        all_errors = []
        if ('unwarping' in self.parameters) and self.parameters['unwarping']:
            # Magnitude 1 file
            try:
                read_node.inputs.magnitude1 = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_MAGNITUDE1_NII)
            except ClinicaException as e:
                all_errors.append(e)

            # Phasediff file
            try:
                read_node.inputs.phasediff = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_PHASEDIFF_NII)
            except ClinicaException as e:
                all_errors.append(e)

        # Bold files
        try:
            read_node.inputs.bold = clinica_file_reader(
                self.subjects, self.sessions, self.bids_directory,
                input_files.FMRI_BOLD_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # T1w-MRI files
        try:
            read_node.inputs.T1w = clinica_file_reader(self.subjects,
                                                       self.sessions,
                                                       self.bids_directory,
                                                       input_files.T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # Reading BIDS json
        # =================

        read_node.inputs.et = []
        read_node.inputs.blipdir = []
        read_node.inputs.tert = []
        read_node.inputs.time_repetition = []
        read_node.inputs.num_slices = []
        read_node.inputs.slice_order = []
        read_node.inputs.ref_slice = []
        read_node.inputs.time_acquisition = []

        if self.parameters['unwarping']:
            # From phasediff json file
            try:
                phasediff_json = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_PHASEDIFF_JSON)
                for json_f in phasediff_json:
                    with open(json_f) as json_file:
                        data = json.load(json_file)
                        # SPM echo times
                        read_node.inputs.et.append(
                            [data['EchoTime1'], data['EchoTime2']])
                        # SPM blip direction
                        # TODO: Verifiy that it is the correct way to get the
                        # blipdir
                        blipdir_raw = data['PhaseEncodingDirection']
                        if len(blipdir_raw) > 1 and blipdir_raw[1] == '-':
                            read_node.inputs.blipdir.append(-1)
                        else:
                            read_node.inputs.blipdir.append(1)
            except ClinicaException as e:
                all_errors.append(e)

        # From func json file
        try:
            func_json = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory,
                                            input_files.FMRI_BOLD_JSON)

            for json_f in func_json:
                with open(json_f) as json_file:
                    data = json.load(json_file)
                    # SPM Total readout time
                    read_node.inputs.tert.append(
                        1 / data['BandwidthPerPixelPhaseEncode'])
                    # SPM Repetition time
                    read_node.inputs.time_repetition.append(
                        data['RepetitionTime'])
                    # Number of slices
                    slice_timing = data['SliceTiming']
                    read_node.inputs.num_slices.append(len(slice_timing))
                    # Slice order
                    slice_order = np.argsort(slice_timing) + 1
                    read_node.inputs.slice_order.append(slice_order.tolist())
                    read_node.inputs.ref_slice.append(
                        np.argmin(slice_timing) + 1)
                    read_node.inputs.time_acquisition.append(
                        data['RepetitionTime'] -
                        data['RepetitionTime'] / float(len(slice_timing)))
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaBIDSError(error_message)

        if ('unwarping' in self.parameters) and self.parameters['unwarping']:
            self.connect([
                # Reading BIDS json
                (read_node, self.input_node, [('et', 'et')]),
                (read_node, self.input_node, [('blipdir', 'blipdir')]),
                (read_node, self.input_node, [('tert', 'tert')]),
                # Reading BIDS files
                (read_node, self.input_node, [('phasediff', 'phasediff')]),
                (read_node, self.input_node, [('magnitude1', 'magnitude1')]),
            ])

        self.connect([
            # Reading BIDS json
            (read_node, self.input_node, [('time_repetition',
                                           'time_repetition')]),
            (read_node, self.input_node, [('num_slices', 'num_slices')]),
            (read_node, self.input_node, [('slice_order', 'slice_order')]),
            (read_node, self.input_node, [('ref_slice', 'ref_slice')]),
            (read_node, self.input_node, [('time_acquisition',
                                           'time_acquisition')]),
            # Reading BIDS files
            (read_node, self.input_node, [('bold', 'bold')]),
            (read_node, self.input_node, [('T1w', 'T1w')]),
        ])
예제 #13
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        from os import pardir
        from os.path import dirname, join, abspath, split, exists
        import sys
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.inputs import check_bids_folder
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import fetch_file
        from clinica.utils.ux import print_images_to_process
        from clinica.utils.stream import cprint

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, 'resources', 'masks')
        self.ref_template = join(
                path_to_mask, 'mni_icbm152_t1_tal_nlin_sym_09c.nii')
        self.ref_crop = join(path_to_mask, 'ref_cropped_template.nii.gz')
        url1 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/ref_cropped_template.nii.gz"
        url2 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/mni_icbm152_t1_tal_nlin_sym_09c.nii"

        if not(exists(self.ref_template)):
            try:
                fetch_file(url2, self.ref_template)
            except IOError as err:
                cprint('Unable to download required template (mni_icbm152) for processing:', err)

        if not(exists(self.ref_crop)):
            try:
                fetch_file(url1, self.ref_crop)
            except IOError as err:
                cprint('Unable to download required template (ref_crop) for processing:', err)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(
                    self.subjects,
                    self.sessions,
                    self.bids_directory,
                    T1W_NII)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n' + str(e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 6 minutes per image.')

        read_node = npe.Node(
                name="ReadingFiles",
                iterables=[
                    ('t1w', t1w_files),
                    ],
                synchronize=True,
                interface=nutil.IdentityInterface(
                    fields=self.get_input_fields())
                )
        self.connect([
            (read_node, self.input_node, [('t1w', 't1w')]),
            ])
예제 #14
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )
        FILE2 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )

        self.ref_template = join(path_to_mask, FILE2.filename)
        self.ref_crop = join(path_to_mask, FILE1.filename)

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )

        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(
                msg=
                f"Clinica found {len(processed_ids)} image(s) already processed in CAPS directory:",
                lvl="warning",
            )
            for image_id in processed_ids:
                cprint(msg=f"{image_id.replace('_', ' | ')}", lvl="warning")
            cprint(msg=f"Image(s) will be ignored by Clinica.", lvl="warning")
            input_ids = [
                p_id + "_" + s_id
                for p_id, s_id in zip(self.subjects, self.sessions)
            ]
            to_process_ids = list(set(input_ids) - set(processed_ids))
            self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 6 minutes per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        self.connect([
            (read_node, self.input_node, [("t1w", "t1w")]),
        ])
예제 #15
0
def extract_dl_t1w(caps_directory,
                   tsv,
                   working_directory=None,
                   extract_method='whole',
                   patch_size=50,
                   stride_size=50,
                   slice_direction=0,
                   slice_mode='original'):
    """ This is a preprocessing pipeline to convert the MRIs in nii.gz format
    into tensor versions (using pytorch format). It also prepares the
    slice-level and patch-level data from the entire MRI and save them on disk.
    This enables the training process:
        - For slice-level CNN, all slices were extracted from the entire
          MRI from three different axis. The first and last 15 slice were
          discarded due to the lack of information.
        - For patch-level CNN, the 3D patch (with specific patch size)
          were extracted by a 3D window.

    Parameters
    ----------

    caps_directory: str
      CAPS directory where stores the output of preprocessing.
    tsv: str
      TVS file with the subject list (participant_id and session_id).
    extract_method:
      Select which extract method will be applied for the outputs:
      - 'slice' to get slices from the MRI,
      - 'patch' to get 3D patches from MRI,
      - 'whole' to get the complete MRI
    patch_size: int
      Size for extracted 3D patches (only 'patch' method).
    stride_size: int
      Sliding size window of when extracting the patches (only 'patch' method).
    slice_direction: int
      Which direction the slices will be extracted (only 'slice' method):
      - 0: Sagittal plane
      - 1: Coronal plane
      - 2: Axial plane
    slice_mode: str
      Mode how slices are stored (only 'slice' method):
      - original: saves one single channel (intensity)
      - rgb: saves with three channels (red, green, blue)
    working_directory: str
      Folder containing a temporary space to save intermediate results.
    e

    Returns
    -------
    wf: class nipype.pipeline.engine.workflows.Workflow
      A class du type nypipe workflow to control, setup, and execute a process
      as a nypipe pipeline.

    """

    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as nutil
    import nipype.pipeline.engine as npe
    from nipype.interfaces.io import DataSink
    from nipype import config
    import tempfile
    from clinica.utils.inputs import check_caps_folder
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.nipype import fix_join
    from .T1_preparedl_utils import (extract_slices, extract_patches,
                                     save_as_pt, container_from_filename,
                                     get_data_datasink)

    T1W_LINEAR = {
        'pattern': '*space-MNI152NLin2009cSym_res-1x1x1_T1w.nii.gz',
        'description': 'T1W Image registered using T1_Linear'
    }

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    check_caps_folder(caps_directory)
    is_bids_dir = False
    use_session_tsv = False

    sessions, subjects = get_subject_session_list(caps_directory, tsv,
                                                  is_bids_dir, use_session_tsv,
                                                  working_directory)

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from t1_linear folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, caps_directory,
                                        T1W_LINEAR)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    # Read node
    # ----------------------
    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    # Get subject ID node
    # ----------------------
    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    # The processing nodes

    # Node to save MRI in nii.gz format into pytorch .pt format
    # ----------------------
    save_as_pt = npe.MapNode(name='save_as_pt',
                             iterfield=['input_img'],
                             interface=nutil.Function(
                                 function=save_as_pt,
                                 input_names=['input_img'],
                                 output_names=['output_file']))

    # Extract slices node (options: 3 directions, mode)
    # ----------------------
    extract_slices = npe.MapNode(
        name='extract_slices',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_slices,
            input_names=['preprocessed_T1', 'slice_direction', 'slice_mode'],
            output_names=['output_file_rgb', 'output_file_original']))

    extract_slices.inputs.slice_direction = slice_direction
    extract_slices.inputs.slice_mode = slice_mode

    # Extract patches node (options, patch size and stride size)
    # ----------------------
    extract_patches = npe.MapNode(
        name='extract_patches',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_patches,
            input_names=['preprocessed_T1', 'patch_size', 'stride_size'],
            output_names=['output_patch']))

    extract_patches.inputs.patch_size = patch_size
    extract_patches.inputs.stride_size = stride_size

    # Output node
    # ----------------------
    outputnode = npe.Node(nutil.IdentityInterface(fields=['preprocessed_T1']),
                          name='outputnode')

    # Node
    # ----------------------
    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # ----------------------
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    # Write node
    # ----------------------
    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    subfolder = 'image_based'
    wf = npe.Workflow(name='dl_prepare_data', base_dir=working_directory)

    # Connections
    # ----------------------
    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (read_node, save_as_pt, [('t1w', 'input_img')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        # Connect to DataSink
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')])
    ])

    if extract_method == 'slice':
        subfolder = 'slice_based'
        wf.connect([(save_as_pt, extract_slices, [('output_file',
                                                   'preprocessed_T1')]),
                    (extract_slices, write_node, [('output_file_rgb',
                                                   '@slices_rgb_T1')]),
                    (extract_slices, write_node, [('output_file_original',
                                                   '@slices_original_T1')])])
    elif extract_method == 'patch':
        subfolder = 'patch_based'
        wf.connect([(save_as_pt, extract_patches, [
            ('output_file', 'preprocessed_T1')
        ]), (extract_patches, write_node, [('output_patch', '@patches_T1')])])
    else:
        wf.connect([(save_as_pt, write_node, [('output_file',
                                               '@output_pt_file')])])

    wf.connect([(container_path, write_node,
                 [(('container', fix_join, 'deeplearning_prepare_data',
                    subfolder, 't1_linear'), 'container')])])

    return wf
예제 #16
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.dwi import check_dwi_volume
        from clinica.utils.inputs import clinica_file_reader
        import clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_utils as utils
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        import clinica.utils.input_files as input_files
        from clinica.utils.stream import cprint

        all_errors = []

        # DWI
        try:
            dwi_bids = clinica_file_reader(self.subjects, self.sessions,
                                           self.bids_directory,
                                           input_files.DWI_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # DWI json
        try:
            dwi_json = clinica_file_reader(self.subjects, self.sessions,
                                           self.bids_directory,
                                           input_files.DWI_JSON)

            # Create list_eff_echo_spacings and list_enc_directions
            list_eff_echo_spacings = []
            list_enc_directions = []
            for json in dwi_json:
                [eff_echo_spacing,
                 enc_direction] = utils.parameters_from_dwi_metadata(json)
                list_eff_echo_spacings.append(eff_echo_spacing)
                list_enc_directions.append(enc_direction)

        except ClinicaException as e:
            all_errors.append(e)

        # bval files
        try:
            bval_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        # bvec files
        try:
            bvec_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        # dwi_bids, bvec_files, bval_files may not exist
        if len(all_errors) == 0:
            for (dwi, bvec, bval) in zip(dwi_bids, bvec_files, bval_files):
                check_dwi_volume(in_dwi=dwi, in_bvec=bvec, in_bval=bval)

        # Phasediff json
        try:
            fmap_phasediff_json = clinica_file_reader(
                self.subjects, self.sessions, self.bids_directory,
                input_files.FMAP_PHASEDIFF_JSON)
            # Then deduce delta echo times
            list_delta_echo_times = [
                utils.delta_echo_time_from_bids_fmap(json_phasediff)
                for json_phasediff in fmap_phasediff_json
            ]

        except ClinicaException as e:
            all_errors.append(e)

        # Phasediff nifti
        try:
            phasediff_nifti = clinica_file_reader(
                self.subjects, self.sessions, self.bids_directory,
                input_files.FMAP_PHASEDIFF_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # Magnitude1
        try:
            magnitude1 = clinica_file_reader(self.subjects, self.sessions,
                                             self.bids_directory,
                                             input_files.FMAP_MAGNITUDE1_NII)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaBIDSError(error_message)

        cprint("List JSON parameters for DWI Preprocessing:")
        cprint("- PhaseEncodingDirections")
        cprint(list_enc_directions)
        cprint("- EffectiveEchoSpacing")
        cprint(list_eff_echo_spacings)
        cprint("- DeltaEchoTime")
        cprint(list_delta_echo_times)

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
            iterables=[('dwi', dwi_bids), ('bvec', bvec_files),
                       ('bval', bval_files),
                       ('delta_echo_time', list_delta_echo_times),
                       ('effective_echo_spacing', list_eff_echo_spacings),
                       ('phase_encoding_direction', list_enc_directions),
                       ('fmap_magnitude', magnitude1),
                       ('fmap_phasediff', phasediff_nifti)],
            synchronize=True)

        self.connect([(read_input_node, self.input_node, [('fmap_magnitude',
                                                           'fmap_magnitude')]),
                      (read_input_node, self.input_node, [('fmap_phasediff',
                                                           'fmap_phasediff')]),
                      (read_input_node, self.input_node, [('dwi', 'dwi')]),
                      (read_input_node, self.input_node, [('bval', 'bval')]),
                      (read_input_node, self.input_node, [('bvec', 'bvec')]),
                      (read_input_node, self.input_node,
                       [('delta_echo_time', 'delta_echo_time')]),
                      (read_input_node, self.input_node,
                       [('effective_echo_spacing', 'effective_echo_spacing')]),
                      (read_input_node, self.input_node,
                       [('phase_encoding_direction',
                         'phase_encoding_direction')])])
예제 #17
0
파일: T1_linear.py 프로젝트: imppppp7/AD-DL
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
     This preprocessing pipeline includes globally three steps:
     1) N4 bias correction (performed with ANTS).
     2) Linear registration to MNI (MNI icbm152 nlinear sym template)
        (performed with ANTS) - RegistrationSynQuick.
     3) Cropping the background (in order to save computational power).
     4) Histogram-based intensity normalization. This is a custom function
        performed by the binary ImageMath included with ANTS.

     Parameters
     ----------
     bids_directory: str
        Folder with BIDS structure.
     caps_directory: str
        Folder where CAPS structure will be stored.
     working_directory: str
        Folder containing a temporary space to save intermediate results.
    """

    from os.path import dirname, join, abspath, split, exists
    from os import pardir, makedirs
    from pathlib import Path
    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinica.utils.check_dependency import check_ants
    from clinicadl.tools.inputs.input import fetch_file
    from clinicadl.tools.inputs.input import RemoteFileStructure
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants

    check_ants()
    check_bids_folder(bids_directory)
    input_dir = abspath(bids_directory)
    caps_directory = abspath(caps_directory)
    is_bids_dir = True
    base_dir = abspath(working_directory)

    home = str(Path.home())
    cache_clinicadl = join(home, '.cache', 'clinicadl', 'ressources', 'masks')
    url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
    FILE1 = RemoteFileStructure(
        filename='ref_cropped_template.nii.gz',
        url=url_aramis,
        checksum=
        '67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3')
    FILE2 = RemoteFileStructure(
        filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
        url=url_aramis,
        checksum=
        '93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34')

    if not (exists(cache_clinicadl)):
        makedirs(cache_clinicadl)

    ref_template = join(cache_clinicadl, FILE2.filename)
    ref_crop = join(cache_clinicadl, FILE1.filename)

    if not (exists(ref_template)):
        try:
            ref_template = fetch_file(FILE2, cache_clinicadl)
        except IOError as err:
            print(
                'Unable to download required template (mni_icbm152) for processing:',
                err)

    if not (exists(ref_crop)):
        try:
            ref_crop = fetch_file(FILE1, cache_clinicadl)
        except IOError as err:
            print(
                'Unable to download required template (ref_crop) for processing:',
                err)

    sessions, subjects = get_subject_session_list(input_dir, tsv, is_bids_dir,
                                                  False, base_dir)

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    from nipype import config
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    # The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(name='n4biascorrection',
                                interface=ants.N4BiasFieldCorrection(
                                    dimension=3,
                                    save_bias=True,
                                    bspline_fitting_distance=600))

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(name='antsRegistrationSynQuick',
                                      interface=ants.RegistrationSynQuick())
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(name='cropnifti',
                         interface=nutil.Function(
                             function=crop_nifti,
                             input_names=['input_img', 'ref_crop'],
                             output_names=['output_img', 'crop_template']))
    cropnifti.inputs.ref_crop = ref_crop

    # ********* Deprecrecated ********** #
    # ** This step was not used in the final version ** #
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

    #   from .T1_linear_utils import ants_histogram_intensity_normalization
    #
    #   # histogram-based intensity normalization
    #   intensitynorm = npe.Node(
    #           name='intensitynormalization',
    #           interface=nutil.Function(
    #               input_names=['image_dimension', 'crop_template', 'input_img'],
    #               output_names=['output_img'],
    #               function=ants_histogram_intensity_normalization
    #               )
    #           )
    #   intensitynorm.inputs.image_dimension = 3

    # DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    # Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')
                                                 ]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),
        (n4biascorrection, ants_registration_node, [('output_image',
                                                     'moving_image')]),
        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', '@affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'),
                                       'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        # (get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')
                                              ]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
    ])

    return wf
예제 #18
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.

        Raise:
            ClinicaBIDSError: If there are duplicated files or missing files for any subject
        """
        import os

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from colorama import Fore

        from clinica.iotools.utils.data_handling import (
            check_volume_location_in_world_coordinate_system, )
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import (
            extract_subjects_sessions_from_filename,
            save_participants_sessions,
        )
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(f"{Fore.YELLOW}Clinica found {len(processed_ids)} image(s) "
                   f"already processed in CAPS directory:{Fore.RESET}")
            for image_id in processed_ids:
                cprint(
                    f"{Fore.YELLOW}\t{image_id.replace('_', ' | ')}{Fore.RESET}"
                )
            if self.overwrite_caps:
                output_folder = "<CAPS>/subjects/<participant_id>/<session_id>/t1/freesurfer_cross_sectional"
                cprint(
                    f"{Fore.YELLOW}\nOutput folders in {output_folder} will be recreated.\n{Fore.RESET}"
                )
            else:
                cprint(
                    f"{Fore.YELLOW}\nImage(s) will be ignored by Clinica.\n{Fore.RESET}"
                )
                input_ids = [
                    p_id + "_" + s_id
                    for p_id, s_id in zip(self.subjects, self.sessions)
                ]
                to_process_ids = list(set(input_ids) - set(processed_ids))
                self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                    to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err_msg = (
                "Clinica faced error(s) while trying to read files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err_msg)

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        save_participants_sessions(self.subjects, self.sessions,
                                   folder_participants_tsv)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("List available in %s" %
                   os.path.join(folder_participants_tsv, "participants.tsv"))
            cprint("The pipeline will last approximately 10 hours per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        check_volume_location_in_world_coordinate_system(
            t1w_files, self.bids_directory)
        self.connect([
            (read_node, self.input_node, [("t1w", "t1w")]),
        ])