示例#1
0
    def run_command(self, args):
        """Run the pipeline with defined args."""
        import os
        import datetime
        from colorama import Fore
        from clinica.utils.stream import cprint
        from clinica.utils.longitudinal import get_participants_long_id
        from clinica.utils.participant import get_subject_session_list
        from .t1_freesurfer_template_cli import T1FreeSurferTemplateCLI
        from .t1_freesurfer_longitudinal_correction_cli import T1FreeSurferLongitudinalCorrectionCLI
        from .longitudinal_utils import save_part_sess_long_ids_to_tsv

        cprint(
            'The t1-freesurfer-longitudinal pipeline is divided into 2 parts:\n'
            '\t%st1-freesurfer-unbiased-template pipeline%s: Creation of unbiased template\n'
            '\t%st1-freesurfer-longitudinal-correction pipeline%s: Longitudinal correction\n'
            % (Fore.BLUE, Fore.RESET, Fore.BLUE, Fore.RESET)
        )

        if not self.absolute_path(args.subjects_sessions_tsv):
            l_sess, l_part = get_subject_session_list(self.absolute_path(args.caps_directory), None, False, False)
            l_long = get_participants_long_id(l_part, l_sess)
            now = datetime.datetime.now().strftime('%H%M%S')
            args.subjects_sessions_tsv = now + '_participants.tsv'
            save_part_sess_long_ids_to_tsv(l_part, l_sess, l_long, os.getcwd(), args.subjects_sessions_tsv)

        cprint('%s\nPart 1/2: Running t1-freesurfer-unbiased-template pipeline%s' % (Fore.BLUE, Fore.RESET))
        unbiased_template_cli = T1FreeSurferTemplateCLI()
        unbiased_template_cli.run_command(args)

        cprint('%s\nPart 2/2 Running t1-freesurfer-longitudinal-correction pipeline%s' % (Fore.BLUE, Fore.RESET))
        longitudinal_correction_cli = T1FreeSurferLongitudinalCorrectionCLI()
        longitudinal_correction_cli.run_command(args)
def cli(
    ctx: click.Context,
    caps_directory: str,
    subjects_sessions_tsv: Optional[str] = None,
    working_directory: Optional[str] = None,
    n_procs: Optional[int] = None,
    overwrite_outputs: bool = False,
) -> None:
    """Longitudinal pre-processing of T1w images with FreeSurfer.

    https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/T1_FreeSurfer_Longitudinal/
    """
    import datetime
    import os

    from clinica.utils.longitudinal import get_participants_long_id
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.stream import cprint

    from . import t1_freesurfer_longitudinal_correction_cli, t1_freesurfer_template_cli
    from .longitudinal_utils import save_part_sess_long_ids_to_tsv

    cprint(
        "The t1-freesurfer-longitudinal pipeline is divided into 2 parts:\n"
        "\tt1-freesurfer-unbiased-template pipeline: Creation of unbiased template\n"
        "\tt1-freesurfer-longitudinal-correction pipeline: Longitudinal correction."
    )

    if not subjects_sessions_tsv:
        l_sess, l_part = get_subject_session_list(caps_directory, None, False,
                                                  False)
        l_long = get_participants_long_id(l_part, l_sess)
        now = datetime.datetime.now().strftime("%H%M%S")
        subjects_sessions_tsv = now + "_participants.tsv"
        save_part_sess_long_ids_to_tsv(l_part, l_sess, l_long, os.getcwd(),
                                       subjects_sessions_tsv)

    cprint("Part 1/2: Running t1-freesurfer-unbiased-template pipeline.")
    ctx.invoke(
        t1_freesurfer_template_cli.cli,
        caps_directory=caps_directory,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
        overwrite_outputs=overwrite_outputs,
    )

    cprint("Part 2/2 Running t1-freesurfer-longitudinal-correction pipeline.")
    ctx.invoke(
        t1_freesurfer_longitudinal_correction_cli.cli,
        caps_directory=caps_directory,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
        overwrite_outputs=overwrite_outputs,
    )
示例#3
0
    def run_command(self, args):
        """Run the pipeline with defined args."""
        import os
        import datetime
        from colorama import Fore
        from ..t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import T1VolumeTissueSegmentationCLI
        from ..t1_volume_create_dartel.t1_volume_create_dartel_cli import T1VolumeCreateDartelCLI
        from ..t1_volume_dartel2mni.t1_volume_dartel2mni_cli import T1VolumeDartel2MNICLI
        from ..t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
        from clinica.utils.check_dependency import verify_cat12_atlases
        from clinica.utils.filemanip import save_participants_sessions
        from clinica.utils.participant import get_subject_session_list
        from clinica.utils.stream import cprint

        # If the user wants to use any of the atlases of CAT12 and has not installed it, we just remove it from the list
        # of the computed atlases
        args.atlases = verify_cat12_atlases(args.atlases)

        cprint(
            'The t1-volume pipeline is divided into 4 parts:\n'
            '\t%st1-volume-tissue-segmentation pipeline%s: Tissue segmentation, bias correction and spatial normalization to MNI space\n'
            '\t%st1-volume-create-dartel pipeline%s: Inter-subject registration with the creation of a new DARTEL template\n'
            '\t%st1-volume-dartel2mni pipeline%s: DARTEL template to MNI\n'
            '\t%st1-volume-parcellation pipeline%s: Atlas statistics' %
            (Fore.BLUE, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.BLUE,
             Fore.RESET, Fore.BLUE, Fore.RESET))

        if not self.absolute_path(args.subjects_sessions_tsv):
            session_ids, participant_ids = get_subject_session_list(
                self.absolute_path(args.bids_directory), None, True, False)
            now = datetime.datetime.now().strftime('%H%M%S')
            args.subjects_sessions_tsv = now + '_participants.tsv'
            save_participants_sessions(participant_ids, session_ids,
                                       os.getcwd(), args.subjects_sessions_tsv)

        cprint('%s\nPart 1/4: Running t1-volume-segmentation pipeline%s' %
               (Fore.BLUE, Fore.RESET))
        tissue_segmentation_cli = T1VolumeTissueSegmentationCLI()
        tissue_segmentation_cli.run_command(args)

        cprint('%s\nPart 2/4: Running t1-volume-create-dartel pipeline%s' %
               (Fore.BLUE, Fore.RESET))
        create_dartel_cli = T1VolumeCreateDartelCLI()
        create_dartel_cli.run_command(args)

        cprint('%s\nPart 3/4: Running t1-volume-dartel2mni pipeline%s' %
               (Fore.BLUE, Fore.RESET))
        dartel2mni_cli = T1VolumeDartel2MNICLI()
        dartel2mni_cli.run_command(args)

        cprint('%s\nPart 4/4: Running t1-volume-parcellation pipeline%s' %
               (Fore.BLUE, Fore.RESET))
        parcellation_cli = T1VolumeParcellationCLI()
        parcellation_cli.run_command(args)
示例#4
0
def create_merge_file(bids_dir,
                      out_tsv,
                      caps_dir=None,
                      tsv_file=None,
                      pipelines=None,
                      **kwargs):
    """
    Merge all the .TSV files containing clinical data of a BIDS compliant dataset and store
    the result inside a .TSV file.

    Args:
        bids_dir: path to the BIDS folder
        out_tsv: path to the output tsv file
        caps_dir: path to the CAPS folder (optional)
        tsv_file: TSV file containing the subjects with their sessions (optional)
        pipelines: when adding CAPS information, indicates the pipelines that will be merged (optional)

    """
    from os import path
    from glob import glob
    import os
    import pandas as pd
    import numpy as np
    import warnings
    from .pipeline_handling import InitException, DatasetError
    from clinica.utils.participant import get_subject_session_list

    if caps_dir is not None:
        if not path.isdir(caps_dir):
            raise IOError('The path to the CAPS directory is wrong')

    col_list = []
    scans_dict = {}

    if not os.path.isfile(path.join(bids_dir, 'participants.tsv')):
        raise IOError(
            'participants.tsv not found in the specified BIDS directory')
    participants_df = pd.read_csv(path.join(bids_dir, 'participants.tsv'),
                                  sep='\t')

    sessions, subjects = get_subject_session_list(bids_dir,
                                                  ss_file=tsv_file,
                                                  use_session_tsv=True)
    n_sessions = len(sessions)

    # Find what is dir and what is file_name
    if os.sep not in out_tsv:
        out_dir = os.getcwd()
        if out_tsv == '.':
            out_file_name = 'merge_tsv.tsv'
        else:
            out_file_name = out_tsv
    else:
        out_file_name = out_tsv.split(os.sep)[-1]
        out_dir = path.dirname(out_tsv)

    if len(out_file_name) == 0:
        out_file_name = 'merge_tsv.tsv'

    if '.' not in out_file_name:
        out_file_name = out_file_name + '.tsv'
    else:
        extension = os.path.splitext(out_file_name)[1]
        if extension != '.tsv':
            raise TypeError('Output file must be .tsv.')

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    for col in participants_df.columns.values:
        col_list.append(col)

    merged_df = pd.DataFrame(columns=col_list)

    # BIDS part
    i_subject = 0
    while i_subject < n_sessions:
        sub_path = path.join(bids_dir, subjects[i_subject])
        sub_name = sub_path.split(os.sep)[-1]
        # For each subject, extract the relative row from the dataframe
        row_participant = participants_df[participants_df['participant_id'] ==
                                          sub_name]
        # Open the sessions file related to the subject
        sessions_df = pd.read_csv(path.join(sub_path,
                                            sub_name + '_sessions.tsv'),
                                  sep='\t')
        # Looking for the sessions corresponding to the subject
        loc_sessions = []
        i_session = i_subject
        while i_session < n_sessions and subjects[i_session] == subjects[
                i_subject]:
            loc_sessions.append(i_session)
            i_session += 1

        # For each session found
        # extract the information contained in the scans files
        # for line in range(0, len(sessions_df)):
        for i_session in loc_sessions:
            # Extract and convert to a dictionary information
            # regarding the session
            row_session_df = sessions_df[sessions_df.session_id ==
                                         sessions[i_session]]
            row_session_df.reset_index(inplace=True, drop=True)
            if len(row_session_df) == 0:
                raise DatasetError(sessions_df.loc[0, 'session_id'] + ' / ' +
                                   sessions[i_session])

            new_cols = [
                s for s in row_session_df.columns.values if s not in col_list
            ]
            if len(new_cols) != 0:
                for i in range(0, len(new_cols)):
                    col_list.append(new_cols[i])

            session_id = row_session_df.loc[0, 'session_id']
            if os.path.isfile(
                    path.join(
                        bids_dir, sub_name, 'ses-' + session_id,
                        sub_name + '_' + 'ses-' + session_id + '_scans.tsv')):
                scans_df = pd.read_csv(path.join(
                    bids_dir, sub_name, 'ses-' + session_id,
                    sub_name + '_' + 'ses-' + session_id + '_scans.tsv'),
                                       sep='\t')
                for i in range(0, len(scans_df)):
                    for col in scans_df.columns.values:
                        if col == 'filename':
                            pass
                        else:
                            file_scan = scans_df.iloc[i]['filename']
                            file_name = file_scan.split('/')[1]
                            # Remove the extension .nii.gz
                            file_name = os.path.splitext(
                                os.path.splitext(file_name)[0])[0]
                            file_parts = file_name.split('_')
                            last_pattern_index = len(file_parts) - 1
                            mod_type = file_parts[last_pattern_index]
                            value = scans_df.iloc[i][col]
                            new_col_name = col + '_' + mod_type
                            scans_dict.update({new_col_name: value})
                row_scans = pd.DataFrame(scans_dict, index=[0])
            else:
                row_scans = pd.DataFrame()

            new_cols = [
                s for s in row_scans.columns.values if s not in col_list
            ]
            if len(new_cols) != 0:
                for i in range(0, len(new_cols)):
                    col_list.append(new_cols[i])

            row_to_append_df = pd.DataFrame(columns=row_participant.columns)
            for col in row_participant:
                row_to_append_df[col] = row_participant[col]

            # Append all the data inside session_df
            for col in row_session_df:
                row_to_append_df[col] = row_session_df[col].values[0]

            for col in row_scans:
                row_to_append_df[col] = row_scans[col].values[0]

            merged_df = merged_df.append(row_to_append_df)
        scans_dict = {}
        i_subject = loc_sessions[-1] + 1

    old_index = col_list.index('session_id')
    col_list.insert(1, col_list.pop(old_index))
    merged_df = merged_df[col_list]
    merged_df.to_csv(path.join(out_dir, out_file_name), sep='\t', index=False)

    len_BIDS = len(merged_df.columns)

    # # Call the script for computing the missing modalities
    # # and append the result to the merged file
    # compute_missing_mods(bids_dir, out_dir, 'tmpG7VIY0')
    # tmp_ses = glob(path.join(out_dir, 'tmpG7VIY0*'))
    # for f in tmp_ses:
    #     # Skip the summary file
    #     if 'summary' not in f:
    #         # Load the file
    #         mss_df = pd.read_csv(f, sep='\t')
    #         f_name = f.split(os.sep)[-1]
    #         patterns = f_name.split('-')
    #         ses_id = patterns[len(patterns) - 1]
    #         ses_id = ses_id.replace('.tsv', '')
    #         cols = mss_df.columns.values
    #
    #         # If the file opened contains new columns,
    #         # add them to the existing merged_df
    #         for col_name in cols:
    #             if col_name not in col_list:
    #                 merged_df[col_name] = 0
    #
    #         for i in range(0, len(mss_df)):
    #             row = mss_df.iloc[i]
    #             subj_idx = merged_df[(merged_df['participant_id'] == row['participant_id']) & (
    #                     merged_df['session_id'] == ses_id)].index.tolist()
    #
    #             if len(subj_idx) > 1:
    #                 raise ValueError('Multiple row for the same visit in the merge-tsv file.')
    #             elif len(subj_idx) == 0:
    #                 print 'Warning: Found modalities missing information for the subject:' + row[
    #                     'participant_id'] + ' visit:' + ses_id + ' but the subject is not included in the column participant_id.'
    #                 continue
    #             else:
    #                 subj_idx = subj_idx[0]
    #             for col_name in cols:
    #                 if not col_name == 'participant_id':
    #                     merged_df.iloc[subj_idx, merged_df.columns.get_loc(col_name)] = row[col_name]
    #
    # # Remove all the temporary files created
    # for f in tmp_ses:
    #     os.remove(f)
    #
    # if true_false_mode:
    #     merged_df = merged_df.replace(['Y', 'N'], ['0', '1'])

    merged_df = merged_df.reset_index(drop=True)

    # CAPS
    if caps_dir is not None:
        # Call the different pipelines
        from .pipeline_handling import t1_volume_pipeline, pet_volume_pipeline

        pipeline_options = {
            't1-volume': t1_volume_pipeline,
            'pet-volume': pet_volume_pipeline
        }
        columns_summary = [
            'pipeline_name', 'group_id', 'atlas_id', 'regions_number',
            'first_column_name', 'last_column_name'
        ]
        merged_summary_df = pd.DataFrame(columns=columns_summary)
        if pipelines is None:
            for key, pipeline in pipeline_options.items():
                try:
                    merged_df, summary_df = pipeline(caps_dir, merged_df,
                                                     **kwargs)
                    merged_summary_df = pd.concat(
                        [merged_summary_df, summary_df])

                except InitException:
                    warnings.warn('This pipeline was not initialized: ' + key)
        else:
            for pipeline in pipelines:
                merged_df, summary_df = pipeline_options[pipeline](caps_dir,
                                                                   merged_df,
                                                                   **kwargs)
                merged_summary_df = pd.concat([merged_summary_df, summary_df])

        n_atlas = len(merged_summary_df)
        index_column_df = pd.DataFrame(
            index=np.arange(n_atlas),
            columns=['first_column_index', 'last_column_index'])
        index_column_df.iat[0, 0] = len_BIDS
        index_column_df.iat[n_atlas - 1, 1] = np.shape(merged_df)[1] - 1
        for i in range(1, n_atlas):
            index_column_df.iat[i, 0] = index_column_df.iat[
                i - 1, 0] + merged_summary_df.iat[i - 1, 3]
            index_column_df.iat[i - 1, 1] = index_column_df.iat[i, 0] - 1

        merged_summary_df.reset_index(inplace=True, drop=True)
        merged_summary_df = pd.concat([merged_summary_df, index_column_df],
                                      axis=1)
        summary_filename = out_file_name.split('.')[0] + '_summary.tsv'
        merged_summary_df.to_csv(path.join(out_dir, summary_filename),
                                 sep='\t',
                                 index=False)

    merged_df.to_csv(path.join(out_dir, out_file_name), sep='\t', index=False)
示例#5
0
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
    This preprocessing pipeline includes globally three steps:
    1) N4 bias correction (performed with ANTS).
    2) Linear registration to MNI (MNI icbm152 nlinear sym template)
       (performed with ANTS) - RegistrationSynQuick.
    3) Cropping the background (in order to save computational power).
    4) Histogram-based intensity normalization. This is a custom function
       performed by the binary ImageMath included with ANTS. 

    Parameters
    ----------
    bids_directory: str
       Folder with BIDS structure.
    caps_directory: str
       Folder where CAPS structure will be stored.
    working_directory: str
       Folder containing a temporary space to save intermediate results.
   """

    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinicadl.tools.inputs.input import fetch_file
    from os.path import dirname, join, abspath, split, exists
    from os import pardir

    check_bids_folder(bids_directory)
    input_dir = bids_directory
    is_bids_dir = True
    base_dir = working_directory

    root = dirname(abspath(join(abspath(__file__), pardir)))
    path_to_mask = join(root, 'resources', 'masks')
    ref_template = join(path_to_mask, 'mni_icbm152_t1_tal_nlin_sym_09c.nii')
    ref_crop = join(path_to_mask, 'ref_cropped_template.nii.gz')
    url1 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/ref_cropped_template.nii.gz"
    url2 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/mni_icbm152_t1_tal_nlin_sym_09c.nii"
    if not (exists(ref_template)):
        try:
            fetch_file(url2, ref_template)
        except IOError as err:
            print(
                'Unable to download required template (mni_icbm152) for processing:',
                err)

    if not (exists(ref_crop)):
        try:
            fetch_file(url1, ref_crop)
        except IOError as err:
            print(
                'Unable to download required template (ref_crop) for processing:',
                err)

    sessions, subjects = get_subject_session_list(input_dir, tsv, is_bids_dir,
                                                  False, base_dir)

    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants
    from clinica.utils.filemanip import get_subject_id

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
       Returns:
       A list of (string) input fields name.
       """
        return ['t1w']

    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    ## The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(name='n4biascorrection',
                                interface=ants.N4BiasFieldCorrection(
                                    dimension=3,
                                    save_bias=True,
                                    bspline_fitting_distance=600))

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(name='antsRegistrationSynQuick',
                                      interface=ants.RegistrationSynQuick())
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(name='cropnifti',
                         interface=nutil.Function(
                             function=crop_nifti,
                             input_names=['input_img', 'ref_crop'],
                             output_names=['output_img', 'crop_template']))
    cropnifti.inputs.ref_crop = ref_crop

    #### Deprecrecated ####
    #### This step was not used in the final version ####
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

    #   from .T1_linear_utils import ants_histogram_intensity_normalization
    #
    #   ## histogram-based intensity normalization
    #   intensitynorm = npe.Node(
    #           name='intensitynormalization',
    #           interface=nutil.Function(
    #               input_names=['image_dimension', 'crop_template', 'input_img'],
    #               output_names=['output_img'],
    #               function=ants_histogram_intensity_normalization
    #               )
    #           )
    #   intensitynorm.inputs.image_dimension = 3

    ## DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    ## Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')
                                                 ]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),
        (n4biascorrection, ants_registration_node, [('output_image',
                                                     'moving_image')]),
        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', 'affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'),
                                       'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        #(get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')
                                              ]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
    ])

    return wf
示例#6
0
文件: extract.py 项目: mdiazmel/AD-DL
def DeepLearningPrepareData(caps_directory, tsv_file, parameters):
    import os
    from os import path

    from clinica.utils.exceptions import (
        ClinicaBIDSError,
        ClinicaCAPSError,
        ClinicaException,
    )
    from clinica.utils.input_files import (
        T1W_EXTENSIVE,
        T1W_LINEAR,
        T1W_LINEAR_CROPPED,
        pet_linear_nii,
    )
    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from torch import save as save_tensor

    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import (
        check_mask_list,
        extract_images,
        extract_patches,
        extract_roi,
        extract_slices,
    )

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    input_dir = caps_directory
    logger.debug(f"CAPS directory : {input_dir}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(input_dir, tsv_file,
                                                  is_bids_dir, False, None)
    logger.info(
        f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
    )
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    if parameters["preprocessing"] == "t1-linear":
        mod_subfolder = "t1_linear"
        if parameters["use_uncropped_image"]:
            FILE_TYPE = T1W_LINEAR
        else:
            FILE_TYPE = T1W_LINEAR_CROPPED
    if parameters["preprocessing"] == "t1-extensive":
        mod_subfolder = "t1_extensive"
        FILE_TYPE = T1W_EXTENSIVE
        parameters["uncropped_image"] = None
    if parameters["preprocessing"] == "pet-linear":
        mod_subfolder = "pet_linear"
        FILE_TYPE = pet_linear_nii(
            parameters["acq_label"],
            parameters["suvr_reference_region"],
            parameters["use_uncropped_image"],
        )
    if parameters["preprocessing"] == "custom":
        mod_subfolder = "custom"
        FILE_TYPE = {
            "pattern": f"*{parameters['custom_suffix']}",
            "description": "Custom suffix",
        }
        parameters["use_uncropped_image"] = None
    parameters["file_type"] = FILE_TYPE

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      FILE_TYPE)

    # Loop on the images
    for file in input_files:
        logger.debug(f"  Processing of {file}.")
        container = container_from_filename(file)
        # Extract the wanted tensor
        if parameters["mode"] == "image":
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
        elif parameters["mode"] == "slice":
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
        elif parameters["mode"] == "patch":
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
        elif parameters["mode"] == "roi":
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                parameters["roi_template"] = parameters["roi_custom_template"]
                if parameters["roi_custom_template"] is None:
                    raise ValueError(
                        "A custom template must be defined when the modality is set to custom."
                    )
            else:
                from .extract_utils import TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ValueError("A list of regions must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_custom_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_custom_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_list=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
        # Write the extracted tensor on a .pt file
        for tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, tensor[0])
            save_tensor(tensor[1], output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
示例#7
0
文件: extract.py 项目: ravih18/AD-DL
def DeepLearningPrepareData(caps_directory, tsv_file, n_proc, parameters):
    import os
    from os import path

    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from joblib import Parallel, delayed
    from torch import save as save_tensor

    from clinicadl.utils.exceptions import ClinicaDLArgumentError
    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import check_mask_list, compute_folder_and_file_type

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    logger.debug(f"CAPS directory : {caps_directory}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(caps_directory, tsv_file,
                                                  is_bids_dir, False, None)
    if parameters["prepare_dl"]:
        logger.info(
            f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
        )
    else:
        logger.info(
            f"Images will be extracted in Pytorch tensor from {len(sessions)} images."
        )
        logger.info(
            f"Information for {parameters['mode']} will be saved in output JSON file and will be used "
            f"during training for on-the-fly extraction.")
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    mod_subfolder, file_type = compute_folder_and_file_type(parameters)
    parameters["file_type"] = file_type

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      file_type)[0]

    def write_output_imgs(output_mode, container, subfolder):
        # Write the extracted tensor on a .pt file
        for filename, tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, filename)
            save_tensor(tensor, output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    if parameters["mode"] == "image" or not parameters["prepare_dl"]:

        def prepare_image(file):
            from .extract_utils import extract_images

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_image)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "slice":

        def prepare_slice(file):
            from .extract_utils import extract_slices

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_slice)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "patch":

        def prepare_patch(file):
            from .extract_utils import extract_patches

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_patch)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "roi":

        def prepare_roi(file):
            from .extract_utils import extract_roi

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                if not parameters["roi_custom_template"]:
                    raise ClinicaDLArgumentError(
                        "A custom template must be defined when the modality is set to custom."
                    )
                parameters["roi_template"] = parameters["roi_custom_template"]
                parameters["roi_mask_pattern"] = parameters[
                    "roi_custom_mask_pattern"]
            else:
                from .extract_utils import PATTERN_DICT, TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
                parameters["roi_mask_pattern"] = PATTERN_DICT[
                    parameters["preprocessing"]]

            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ClinicaDLArgumentError(
                    "A list of regions of interest must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_names=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_roi)(file)
                                for file in input_files)

    else:
        raise NotImplementedError(
            f"Extraction is not implemented for mode {parameters['mode']}.")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
示例#8
0
    def __init__(
        self,
        bids_directory=None,
        caps_directory=None,
        tsv_file=None,
        overwrite_caps=False,
        base_dir=None,
        parameters={},
        name=None,
    ):
        """Init a Pipeline object.

        Args:
            bids_directory (str, optional): Path to a BIDS directory. Defaults to None.
            caps_directory (str, optional): Path to a CAPS directory. Defaults to None.
            tsv_file (str, optional): Path to a subjects-sessions `.tsv` file. Defaults to None.
            overwrite_caps (bool, optional): Overwrite or not output directory.. Defaults to False.
            base_dir (str, optional): Working directory (attribute of Nipype::Workflow class). Defaults to None.
            parameters (dict, optional): Pipeline parameters. Defaults to {}.
            name (str, optional): Pipeline name. Defaults to None.

        Raises:
            RuntimeError: [description]
        """
        import inspect
        import os
        from tempfile import mkdtemp

        from colorama import Fore

        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.inputs import check_bids_folder, check_caps_folder
        from clinica.utils.participant import get_subject_session_list

        self._is_built = False
        self._overwrite_caps = overwrite_caps
        self._bids_directory = bids_directory
        self._caps_directory = caps_directory
        self._verbosity = "debug"
        self._tsv_file = tsv_file
        self._info_file = os.path.join(
            os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))),
            "info.json",
        )
        self._info = {}

        if base_dir is None:
            self.base_dir = mkdtemp()
            self._base_dir_was_specified = False
        else:
            self.base_dir = base_dir
            self._base_dir_was_specified = True

        if name:
            self._name = name
        else:
            self._name = self.__class__.__name__
        self._parameters = parameters

        if self._bids_directory is None:
            if self._caps_directory is None:
                raise RuntimeError(
                    f"{Fore.RED}[Error] The {self._name} pipeline does not contain "
                    f"BIDS nor CAPS directory at the initialization.{Fore.RESET}"
                )

            check_caps_folder(self._caps_directory)
            input_dir = self._caps_directory
            is_bids_dir = False
        else:
            check_bids_folder(self._bids_directory)
            input_dir = self._bids_directory
            is_bids_dir = True
        self._sessions, self._subjects = get_subject_session_list(
            input_dir, tsv_file, is_bids_dir, False, base_dir
        )

        self.init_nodes()
示例#9
0
def extract_dl_t1w(caps_directory,
                   tsv,
                   working_directory=None,
                   extract_method='whole',
                   patch_size=50,
                   stride_size=50,
                   slice_direction=0,
                   slice_mode='original'):
    """ This is a preprocessing pipeline to convert the MRIs in nii.gz format
    into tensor versions (using pytorch format). It also prepares the
    slice-level and patch-level data from the entire MRI and save them on disk.
    This enables the training process:
        - For slice-level CNN, all slices were extracted from the entire
          MRI from three different axis. The first and last 15 slice were
          discarded due to the lack of information.
        - For patch-level CNN, the 3D patch (with specific patch size)
          were extracted by a 3D window.

    Parameters
    ----------

    caps_directory: str
      CAPS directory where stores the output of preprocessing.
    tsv: str
      TVS file with the subject list (participant_id and session_id).
    extract_method:
      Select which extract method will be applied for the outputs:
      - 'slice' to get slices from the MRI,
      - 'patch' to get 3D patches from MRI,
      - 'whole' to get the complete MRI
    patch_size: int
      Size for extracted 3D patches (only 'patch' method).
    stride_size: int
      Sliding size window of when extracting the patches (only 'patch' method).
    slice_direction: int
      Which direction the slices will be extracted (only 'slice' method):
      - 0: Sagittal plane
      - 1: Coronal plane
      - 2: Axial plane
    slice_mode: str
      Mode how slices are stored (only 'slice' method):
      - original: saves one single channel (intensity)
      - rgb: saves with three channels (red, green, blue)
    working_directory: str
      Folder containing a temporary space to save intermediate results.
    e

    Returns
    -------
    wf: class nipype.pipeline.engine.workflows.Workflow
      A class du type nypipe workflow to control, setup, and execute a process
      as a nypipe pipeline.

    """

    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as nutil
    import nipype.pipeline.engine as npe
    from nipype.interfaces.io import DataSink
    from nipype import config
    import tempfile
    from clinica.utils.inputs import check_caps_folder
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.nipype import fix_join
    from .T1_preparedl_utils import (extract_slices, extract_patches,
                                     save_as_pt, container_from_filename,
                                     get_data_datasink)

    T1W_LINEAR = {
        'pattern': '*space-MNI152NLin2009cSym_res-1x1x1_T1w.nii.gz',
        'description': 'T1W Image registered using T1_Linear'
    }

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    check_caps_folder(caps_directory)
    is_bids_dir = False
    use_session_tsv = False

    sessions, subjects = get_subject_session_list(caps_directory, tsv,
                                                  is_bids_dir, use_session_tsv,
                                                  working_directory)

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from t1_linear folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, caps_directory,
                                        T1W_LINEAR)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    # Read node
    # ----------------------
    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    # Get subject ID node
    # ----------------------
    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    # The processing nodes

    # Node to save MRI in nii.gz format into pytorch .pt format
    # ----------------------
    save_as_pt = npe.MapNode(name='save_as_pt',
                             iterfield=['input_img'],
                             interface=nutil.Function(
                                 function=save_as_pt,
                                 input_names=['input_img'],
                                 output_names=['output_file']))

    # Extract slices node (options: 3 directions, mode)
    # ----------------------
    extract_slices = npe.MapNode(
        name='extract_slices',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_slices,
            input_names=['preprocessed_T1', 'slice_direction', 'slice_mode'],
            output_names=['output_file_rgb', 'output_file_original']))

    extract_slices.inputs.slice_direction = slice_direction
    extract_slices.inputs.slice_mode = slice_mode

    # Extract patches node (options, patch size and stride size)
    # ----------------------
    extract_patches = npe.MapNode(
        name='extract_patches',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_patches,
            input_names=['preprocessed_T1', 'patch_size', 'stride_size'],
            output_names=['output_patch']))

    extract_patches.inputs.patch_size = patch_size
    extract_patches.inputs.stride_size = stride_size

    # Output node
    # ----------------------
    outputnode = npe.Node(nutil.IdentityInterface(fields=['preprocessed_T1']),
                          name='outputnode')

    # Node
    # ----------------------
    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # ----------------------
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    # Write node
    # ----------------------
    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    subfolder = 'image_based'
    wf = npe.Workflow(name='dl_prepare_data', base_dir=working_directory)

    # Connections
    # ----------------------
    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (read_node, save_as_pt, [('t1w', 'input_img')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        # Connect to DataSink
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')])
    ])

    if extract_method == 'slice':
        subfolder = 'slice_based'
        wf.connect([(save_as_pt, extract_slices, [('output_file',
                                                   'preprocessed_T1')]),
                    (extract_slices, write_node, [('output_file_rgb',
                                                   '@slices_rgb_T1')]),
                    (extract_slices, write_node, [('output_file_original',
                                                   '@slices_original_T1')])])
    elif extract_method == 'patch':
        subfolder = 'patch_based'
        wf.connect([(save_as_pt, extract_patches, [
            ('output_file', 'preprocessed_T1')
        ]), (extract_patches, write_node, [('output_patch', '@patches_T1')])])
    else:
        wf.connect([(save_as_pt, write_node, [('output_file',
                                               '@output_pt_file')])])

    wf.connect([(container_path, write_node,
                 [(('container', fix_join, 'deeplearning_prepare_data',
                    subfolder, 't1_linear'), 'container')])])

    return wf
示例#10
0
    def __init__(self,
                 bids_directory=None,
                 caps_directory=None,
                 tsv_file=None,
                 overwrite_caps=False,
                 base_dir=None,
                 parameters={},
                 name=None):
        """Init a Pipeline object.

        Args:
            bids_directory (optional): Path to a BIDS directory.
            caps_directory (optional): Path to a CAPS directory.
            tsv_file (optional): Path to a subjects-sessions `.tsv` file.
            overwrite_caps (optional): Boolean which specifies overwritten of output directory.
            base_dir (optional): Working directory (attribute of Nipype::Workflow class).
            name (optional): Pipeline name.
        """
        import inspect
        import os
        from tempfile import mkdtemp
        from colorama import Fore
        from clinica.utils.inputs import check_caps_folder
        from clinica.utils.inputs import check_bids_folder
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.participant import get_subject_session_list

        self._is_built = False
        self._overwrite_caps = overwrite_caps
        self._bids_directory = bids_directory
        self._caps_directory = caps_directory
        self._verbosity = 'debug'
        self._tsv_file = tsv_file
        self._info_file = os.path.join(
            os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))),
            'info.json')
        self._info = {}

        if base_dir is None:
            self.base_dir = mkdtemp()
            self._base_dir_was_specified = False
        else:
            self.base_dir = base_dir
            self._base_dir_was_specified = True

        if name:
            self._name = name
        else:
            self._name = self.__class__.__name__
        self._parameters = parameters

        if self._bids_directory is None:
            if self._caps_directory is None:
                raise RuntimeError(
                    '%s[Error] The %s pipeline does not contain BIDS nor CAPS directory at the initialization.%s'
                    % (Fore.RED, self._name, Fore.RESET))

            check_caps_folder(self._caps_directory)
            input_dir = self._caps_directory
            is_bids_dir = False
        else:
            check_bids_folder(self._bids_directory)
            input_dir = self._bids_directory
            is_bids_dir = True
        self._sessions, self._subjects = get_subject_session_list(
            input_dir, tsv_file, is_bids_dir, False, base_dir)

        self.init_nodes()
示例#11
0
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
     This preprocessing pipeline includes globally three steps:
     1) N4 bias correction (performed with ANTS).
     2) Linear registration to MNI (MNI icbm152 nlinear sym template)
        (performed with ANTS) - RegistrationSynQuick.
     3) Cropping the background (in order to save computational power).
     4) Histogram-based intensity normalization. This is a custom function
        performed by the binary ImageMath included with ANTS.

     Parameters
     ----------
     bids_directory: str
        Folder with BIDS structure.
     caps_directory: str
        Folder where CAPS structure will be stored.
     working_directory: str
        Folder containing a temporary space to save intermediate results.
    """

    from os.path import dirname, join, abspath, split, exists
    from os import pardir, makedirs
    from pathlib import Path
    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinica.utils.check_dependency import check_ants
    from clinicadl.tools.inputs.input import fetch_file
    from clinicadl.tools.inputs.input import RemoteFileStructure
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants

    check_ants()
    check_bids_folder(bids_directory)
    input_dir = abspath(bids_directory)
    caps_directory = abspath(caps_directory)
    is_bids_dir = True
    base_dir = abspath(working_directory)

    home = str(Path.home())
    cache_clinicadl = join(home, '.cache', 'clinicadl', 'ressources', 'masks')
    url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
    FILE1 = RemoteFileStructure(
        filename='ref_cropped_template.nii.gz',
        url=url_aramis,
        checksum=
        '67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3')
    FILE2 = RemoteFileStructure(
        filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
        url=url_aramis,
        checksum=
        '93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34')

    if not (exists(cache_clinicadl)):
        makedirs(cache_clinicadl)

    ref_template = join(cache_clinicadl, FILE2.filename)
    ref_crop = join(cache_clinicadl, FILE1.filename)

    if not (exists(ref_template)):
        try:
            ref_template = fetch_file(FILE2, cache_clinicadl)
        except IOError as err:
            print(
                'Unable to download required template (mni_icbm152) for processing:',
                err)

    if not (exists(ref_crop)):
        try:
            ref_crop = fetch_file(FILE1, cache_clinicadl)
        except IOError as err:
            print(
                'Unable to download required template (ref_crop) for processing:',
                err)

    sessions, subjects = get_subject_session_list(input_dir, tsv, is_bids_dir,
                                                  False, base_dir)

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    from nipype import config
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    # The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(name='n4biascorrection',
                                interface=ants.N4BiasFieldCorrection(
                                    dimension=3,
                                    save_bias=True,
                                    bspline_fitting_distance=600))

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(name='antsRegistrationSynQuick',
                                      interface=ants.RegistrationSynQuick())
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(name='cropnifti',
                         interface=nutil.Function(
                             function=crop_nifti,
                             input_names=['input_img', 'ref_crop'],
                             output_names=['output_img', 'crop_template']))
    cropnifti.inputs.ref_crop = ref_crop

    # ********* Deprecrecated ********** #
    # ** This step was not used in the final version ** #
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

    #   from .T1_linear_utils import ants_histogram_intensity_normalization
    #
    #   # histogram-based intensity normalization
    #   intensitynorm = npe.Node(
    #           name='intensitynormalization',
    #           interface=nutil.Function(
    #               input_names=['image_dimension', 'crop_template', 'input_img'],
    #               output_names=['output_img'],
    #               function=ants_histogram_intensity_normalization
    #               )
    #           )
    #   intensitynorm.inputs.image_dimension = 3

    # DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    # Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')
                                                 ]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),
        (n4biascorrection, ants_registration_node, [('output_image',
                                                     'moving_image')]),
        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', '@affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'),
                                       'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        # (get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')
                                              ]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
    ])

    return wf
示例#12
0
def cli(
    ctx: click.Context,
    bids_directory: str,
    caps_directory: str,
    group_label: str,
    smooth: List[int] = (8,),
    tissue_classes: List[int] = (1, 2, 3),
    tissue_probability_maps: Optional[str] = None,
    dont_save_warped_unmodulated: bool = False,
    save_warped_modulated: bool = False,
    dartel_tissues: List[int] = (1, 2, 3),
    tissues: List[int] = (1, 2, 3),
    modulate: bool = True,
    voxel_size: Tuple[float, float, float] = (1.5, 1.5, 1.5),
    subjects_sessions_tsv: Optional[str] = None,
    working_directory: Optional[str] = None,
    n_procs: Optional[int] = None,
    yes: bool = False,
) -> None:
    """Volume-based processing of T1-weighted MR images.

       GROUP_LABEL is an user-defined identifier to target a specific group of subjects.

    https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/T1_Volume/
    """
    import datetime
    import os

    from clinica.utils.filemanip import save_participants_sessions
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.stream import cprint

    from ..t1_volume_create_dartel import t1_volume_create_dartel_cli
    from ..t1_volume_dartel2mni import t1_volume_dartel2mni_cli
    from ..t1_volume_parcellation import t1_volume_parcellation_cli
    from ..t1_volume_tissue_segmentation import t1_volume_tissue_segmentation_cli

    cprint(
        "The t1-volume pipeline is divided into 4 parts:\n"
        "\tt1-volume-tissue-segmentation pipeline: "
        "Tissue segmentation, bias correction and spatial normalization to MNI space\n"
        "\tt1-volume-create-dartel pipeline: "
        "Inter-subject registration with the creation of a new DARTEL template\n"
        "\tt1-volume-dartel2mni pipeline: "
        "DARTEL template to MNI\n"
        "\tt1-volume-parcellation pipeline: "
        "Atlas statistics"
    )

    if not subjects_sessions_tsv:
        session_ids, participant_ids = get_subject_session_list(
            bids_directory, None, True, False
        )
        now = datetime.datetime.now().strftime("%H%M%S")
        subjects_sessions_tsv = now + "_participants.tsv"
        save_participants_sessions(
            participant_ids, session_ids, os.getcwd(), subjects_sessions_tsv
        )

    cprint("Part 1/4: Running t1-volume-segmentation pipeline.")
    ctx.invoke(
        t1_volume_tissue_segmentation_cli.cli,
        bids_directory=bids_directory,
        caps_directory=caps_directory,
        tissue_classes=tissue_classes,
        dartel_tissues=dartel_tissues,
        tissue_probability_maps=tissue_probability_maps,
        dont_save_warped_unmodulated=dont_save_warped_unmodulated,
        save_warped_modulated=save_warped_modulated,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
        yes=yes,
    )

    cprint("Part 2/4: Running t1-volume-create-dartel pipeline.")
    ctx.invoke(
        t1_volume_create_dartel_cli.cli,
        bids_directory=bids_directory,
        caps_directory=caps_directory,
        group_label=group_label,
        dartel_tissues=dartel_tissues,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
    )

    cprint("Part 3/4: Running t1-volume-dartel2mni pipeline.")
    ctx.invoke(
        t1_volume_dartel2mni_cli.cli,
        bids_directory=bids_directory,
        caps_directory=caps_directory,
        group_label=group_label,
        smooth=smooth,
        tissues=tissues,
        modulate=modulate,
        voxel_size=voxel_size,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
    )

    cprint("Part 4/4: Running t1-volume-parcellation pipeline.")
    ctx.invoke(
        t1_volume_parcellation_cli.cli,
        caps_directory=caps_directory,
        group_label=group_label,
        subjects_sessions_tsv=subjects_sessions_tsv,
        working_directory=working_directory,
        n_procs=n_procs,
    )
示例#13
0
    def run_command(self, args):
        """Run the pipeline with defined args."""
        import datetime
        import os

        from colorama import Fore

        from clinica.utils.filemanip import save_participants_sessions
        from clinica.utils.participant import get_subject_session_list
        from clinica.utils.stream import cprint

        from ..t1_volume_create_dartel.t1_volume_create_dartel_cli import (
            T1VolumeCreateDartelCLI, )
        from ..t1_volume_dartel2mni.t1_volume_dartel2mni_cli import (
            T1VolumeDartel2MNICLI, )
        from ..t1_volume_parcellation.t1_volume_parcellation_cli import (
            T1VolumeParcellationCLI, )
        from ..t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import (
            T1VolumeTissueSegmentationCLI, )

        cprint(
            f"The t1-volume pipeline is divided into 4 parts:\n"
            f"\t{Fore.BLUE}t1-volume-tissue-segmentation pipeline{Fore.RESET}: "
            f"Tissue segmentation, bias correction and spatial normalization to MNI space\n"
            f"\t{Fore.BLUE}t1-volume-create-dartel pipeline{Fore.RESET}: "
            f"Inter-subject registration with the creation of a new DARTEL template\n"
            f"\t{Fore.BLUE}t1-volume-dartel2mni pipeline{Fore.RESET}: "
            f"DARTEL template to MNI\n"
            f"\t{Fore.BLUE}t1-volume-parcellation pipeline{Fore.RESET}: "
            f"Atlas statistics")

        if not self.absolute_path(args.subjects_sessions_tsv):
            session_ids, participant_ids = get_subject_session_list(
                self.absolute_path(args.bids_directory), None, True, False)
            now = datetime.datetime.now().strftime("%H%M%S")
            args.subjects_sessions_tsv = now + "_participants.tsv"
            save_participants_sessions(participant_ids, session_ids,
                                       os.getcwd(), args.subjects_sessions_tsv)

        cprint(
            f"{Fore.BLUE}\nPart 1/4: Running t1-volume-segmentation pipeline{Fore.RESET}"
        )
        tissue_segmentation_cli = T1VolumeTissueSegmentationCLI()
        tissue_segmentation_cli.run_command(args)

        cprint(
            f"{Fore.BLUE}\nPart 2/4: Running t1-volume-create-dartel pipeline{Fore.RESET}"
        )
        create_dartel_cli = T1VolumeCreateDartelCLI()
        create_dartel_cli.run_command(args)

        cprint(
            f"{Fore.BLUE}\nPart 3/4: Running t1-volume-dartel2mni pipeline{Fore.RESET}"
        )
        dartel2mni_cli = T1VolumeDartel2MNICLI()
        dartel2mni_cli.run_command(args)

        cprint(
            f"{Fore.BLUE}\nPart 4/4: Running t1-volume-parcellation pipeline{Fore.RESET}"
        )
        parcellation_cli = T1VolumeParcellationCLI()
        parcellation_cli.run_command(args)