コード例 #1
0
ファイル: data_handling_cli.py プロジェクト: ghisvail/clinica
    def run_command(self, args):
        from clinica.iotools.utils import data_handling as dt
        from clinica.utils.inputs import check_caps_folder

        check_caps_folder(args.caps_directory)
        dt.compute_missing_processing(args.bids_directory, args.caps_directory,
                                      args.out_file)
コード例 #2
0
def check_missing_processing(
    bids_directory: str,
    caps_directory: str,
    output_file: str,
) -> None:
    """Check missing processing in a CAPS dataset."""
    from clinica.iotools.utils.data_handling import compute_missing_processing
    from clinica.utils.inputs import check_caps_folder

    check_caps_folder(caps_directory)
    compute_missing_processing(bids_directory, caps_directory, output_file)
コード例 #3
0
ファイル: split_manager.py プロジェクト: 14thibea/AD-DL
    def _create_caps_dict(caps_directory, multi_cohort):
        if multi_cohort:
            if not caps_directory.endswith(".tsv"):
                raise ValueError(
                    "If multi_cohort is given, the caps_dir argument should be a path to a TSV file."
                )
            else:
                caps_df = pd.read_csv(caps_directory, sep="\t")
                SplitManager._check_multi_cohort_tsv(caps_df, "CAPS")
                caps_dict = dict()
                for idx in range(len(caps_df)):
                    cohort = caps_df.loc[idx, "cohort"]
                    caps_path = caps_df.loc[idx, "path"]
                    check_caps_folder(caps_path)
                    caps_dict[cohort] = caps_path
        else:
            check_caps_folder(caps_directory)
            caps_dict = {"single": caps_directory}

        return caps_dict
コード例 #4
0
    def create_caps_dict(caps_directory, multi_cohort):

        from clinica.utils.inputs import check_caps_folder

        if multi_cohort:
            if not caps_directory.endswith('.tsv'):
                raise ValueError(
                    'If multi_cohort is given, the caps_dir argument should be a path to a TSV file.'
                )
            else:
                caps_df = pd.read_csv(caps_directory, sep="\t")
                check_multi_cohort_tsv(caps_df, 'CAPS')
                caps_dict = dict()
                for idx in range(len(caps_df)):
                    cohort = caps_df.loc[idx, 'cohort']
                    caps_path = caps_df.loc[idx, 'path']
                    check_caps_folder(caps_path)
                    caps_dict[cohort] = caps_path
        else:
            check_caps_folder(caps_directory)
            caps_dict = {'single': caps_directory}

        return caps_dict
コード例 #5
0
ファイル: data.py プロジェクト: ravih18/AD-DL
    def create_caps_dict(caps_directory: str,
                         multi_cohort: bool) -> Dict[str, str]:

        from clinica.utils.inputs import check_caps_folder

        if multi_cohort:
            if not caps_directory.endswith(".tsv"):
                raise ClinicaDLArgumentError(
                    "If multi_cohort is True, the CAPS_DIRECTORY argument should be a path to a TSV file."
                )
            else:
                caps_df = pd.read_csv(caps_directory, sep="\t")
                check_multi_cohort_tsv(caps_df, "CAPS")
                caps_dict = dict()
                for idx in range(len(caps_df)):
                    cohort = caps_df.loc[idx, "cohort"]
                    caps_path = caps_df.loc[idx, "path"]
                    check_caps_folder(caps_path)
                    caps_dict[cohort] = caps_path
        else:
            check_caps_folder(caps_directory)
            caps_dict = {"single": caps_directory}

        return caps_dict
コード例 #6
0
ファイル: extract.py プロジェクト: mdiazmel/AD-DL
def DeepLearningPrepareData(caps_directory, tsv_file, parameters):
    import os
    from os import path

    from clinica.utils.exceptions import (
        ClinicaBIDSError,
        ClinicaCAPSError,
        ClinicaException,
    )
    from clinica.utils.input_files import (
        T1W_EXTENSIVE,
        T1W_LINEAR,
        T1W_LINEAR_CROPPED,
        pet_linear_nii,
    )
    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from torch import save as save_tensor

    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import (
        check_mask_list,
        extract_images,
        extract_patches,
        extract_roi,
        extract_slices,
    )

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    input_dir = caps_directory
    logger.debug(f"CAPS directory : {input_dir}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(input_dir, tsv_file,
                                                  is_bids_dir, False, None)
    logger.info(
        f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
    )
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    if parameters["preprocessing"] == "t1-linear":
        mod_subfolder = "t1_linear"
        if parameters["use_uncropped_image"]:
            FILE_TYPE = T1W_LINEAR
        else:
            FILE_TYPE = T1W_LINEAR_CROPPED
    if parameters["preprocessing"] == "t1-extensive":
        mod_subfolder = "t1_extensive"
        FILE_TYPE = T1W_EXTENSIVE
        parameters["uncropped_image"] = None
    if parameters["preprocessing"] == "pet-linear":
        mod_subfolder = "pet_linear"
        FILE_TYPE = pet_linear_nii(
            parameters["acq_label"],
            parameters["suvr_reference_region"],
            parameters["use_uncropped_image"],
        )
    if parameters["preprocessing"] == "custom":
        mod_subfolder = "custom"
        FILE_TYPE = {
            "pattern": f"*{parameters['custom_suffix']}",
            "description": "Custom suffix",
        }
        parameters["use_uncropped_image"] = None
    parameters["file_type"] = FILE_TYPE

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      FILE_TYPE)

    # Loop on the images
    for file in input_files:
        logger.debug(f"  Processing of {file}.")
        container = container_from_filename(file)
        # Extract the wanted tensor
        if parameters["mode"] == "image":
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
        elif parameters["mode"] == "slice":
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
        elif parameters["mode"] == "patch":
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
        elif parameters["mode"] == "roi":
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                parameters["roi_template"] = parameters["roi_custom_template"]
                if parameters["roi_custom_template"] is None:
                    raise ValueError(
                        "A custom template must be defined when the modality is set to custom."
                    )
            else:
                from .extract_utils import TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ValueError("A list of regions must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_custom_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_custom_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_list=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
        # Write the extracted tensor on a .pt file
        for tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, tensor[0])
            save_tensor(tensor[1], output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
コード例 #7
0
ファイル: extract.py プロジェクト: ravih18/AD-DL
def DeepLearningPrepareData(caps_directory, tsv_file, n_proc, parameters):
    import os
    from os import path

    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from joblib import Parallel, delayed
    from torch import save as save_tensor

    from clinicadl.utils.exceptions import ClinicaDLArgumentError
    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import check_mask_list, compute_folder_and_file_type

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    logger.debug(f"CAPS directory : {caps_directory}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(caps_directory, tsv_file,
                                                  is_bids_dir, False, None)
    if parameters["prepare_dl"]:
        logger.info(
            f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
        )
    else:
        logger.info(
            f"Images will be extracted in Pytorch tensor from {len(sessions)} images."
        )
        logger.info(
            f"Information for {parameters['mode']} will be saved in output JSON file and will be used "
            f"during training for on-the-fly extraction.")
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    mod_subfolder, file_type = compute_folder_and_file_type(parameters)
    parameters["file_type"] = file_type

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      file_type)[0]

    def write_output_imgs(output_mode, container, subfolder):
        # Write the extracted tensor on a .pt file
        for filename, tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, filename)
            save_tensor(tensor, output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    if parameters["mode"] == "image" or not parameters["prepare_dl"]:

        def prepare_image(file):
            from .extract_utils import extract_images

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_image)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "slice":

        def prepare_slice(file):
            from .extract_utils import extract_slices

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_slice)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "patch":

        def prepare_patch(file):
            from .extract_utils import extract_patches

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_patch)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "roi":

        def prepare_roi(file):
            from .extract_utils import extract_roi

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                if not parameters["roi_custom_template"]:
                    raise ClinicaDLArgumentError(
                        "A custom template must be defined when the modality is set to custom."
                    )
                parameters["roi_template"] = parameters["roi_custom_template"]
                parameters["roi_mask_pattern"] = parameters[
                    "roi_custom_mask_pattern"]
            else:
                from .extract_utils import PATTERN_DICT, TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
                parameters["roi_mask_pattern"] = PATTERN_DICT[
                    parameters["preprocessing"]]

            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ClinicaDLArgumentError(
                    "A list of regions of interest must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_names=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_roi)(file)
                                for file in input_files)

    else:
        raise NotImplementedError(
            f"Extraction is not implemented for mode {parameters['mode']}.")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
コード例 #8
0
ファイル: engine.py プロジェクト: ghisvail/clinica
    def __init__(
        self,
        bids_directory=None,
        caps_directory=None,
        tsv_file=None,
        overwrite_caps=False,
        base_dir=None,
        parameters={},
        name=None,
    ):
        """Init a Pipeline object.

        Args:
            bids_directory (str, optional): Path to a BIDS directory. Defaults to None.
            caps_directory (str, optional): Path to a CAPS directory. Defaults to None.
            tsv_file (str, optional): Path to a subjects-sessions `.tsv` file. Defaults to None.
            overwrite_caps (bool, optional): Overwrite or not output directory.. Defaults to False.
            base_dir (str, optional): Working directory (attribute of Nipype::Workflow class). Defaults to None.
            parameters (dict, optional): Pipeline parameters. Defaults to {}.
            name (str, optional): Pipeline name. Defaults to None.

        Raises:
            RuntimeError: [description]
        """
        import inspect
        import os
        from tempfile import mkdtemp

        from colorama import Fore

        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.inputs import check_bids_folder, check_caps_folder
        from clinica.utils.participant import get_subject_session_list

        self._is_built = False
        self._overwrite_caps = overwrite_caps
        self._bids_directory = bids_directory
        self._caps_directory = caps_directory
        self._verbosity = "debug"
        self._tsv_file = tsv_file
        self._info_file = os.path.join(
            os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))),
            "info.json",
        )
        self._info = {}

        if base_dir is None:
            self.base_dir = mkdtemp()
            self._base_dir_was_specified = False
        else:
            self.base_dir = base_dir
            self._base_dir_was_specified = True

        if name:
            self._name = name
        else:
            self._name = self.__class__.__name__
        self._parameters = parameters

        if self._bids_directory is None:
            if self._caps_directory is None:
                raise RuntimeError(
                    f"{Fore.RED}[Error] The {self._name} pipeline does not contain "
                    f"BIDS nor CAPS directory at the initialization.{Fore.RESET}"
                )

            check_caps_folder(self._caps_directory)
            input_dir = self._caps_directory
            is_bids_dir = False
        else:
            check_bids_folder(self._bids_directory)
            input_dir = self._bids_directory
            is_bids_dir = True
        self._sessions, self._subjects = get_subject_session_list(
            input_dir, tsv_file, is_bids_dir, False, base_dir
        )

        self.init_nodes()
コード例 #9
0
def extract_dl_t1w(caps_directory,
                   tsv,
                   working_directory=None,
                   extract_method='whole',
                   patch_size=50,
                   stride_size=50,
                   slice_direction=0,
                   slice_mode='original'):
    """ This is a preprocessing pipeline to convert the MRIs in nii.gz format
    into tensor versions (using pytorch format). It also prepares the
    slice-level and patch-level data from the entire MRI and save them on disk.
    This enables the training process:
        - For slice-level CNN, all slices were extracted from the entire
          MRI from three different axis. The first and last 15 slice were
          discarded due to the lack of information.
        - For patch-level CNN, the 3D patch (with specific patch size)
          were extracted by a 3D window.

    Parameters
    ----------

    caps_directory: str
      CAPS directory where stores the output of preprocessing.
    tsv: str
      TVS file with the subject list (participant_id and session_id).
    extract_method:
      Select which extract method will be applied for the outputs:
      - 'slice' to get slices from the MRI,
      - 'patch' to get 3D patches from MRI,
      - 'whole' to get the complete MRI
    patch_size: int
      Size for extracted 3D patches (only 'patch' method).
    stride_size: int
      Sliding size window of when extracting the patches (only 'patch' method).
    slice_direction: int
      Which direction the slices will be extracted (only 'slice' method):
      - 0: Sagittal plane
      - 1: Coronal plane
      - 2: Axial plane
    slice_mode: str
      Mode how slices are stored (only 'slice' method):
      - original: saves one single channel (intensity)
      - rgb: saves with three channels (red, green, blue)
    working_directory: str
      Folder containing a temporary space to save intermediate results.
    e

    Returns
    -------
    wf: class nipype.pipeline.engine.workflows.Workflow
      A class du type nypipe workflow to control, setup, and execute a process
      as a nypipe pipeline.

    """

    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as nutil
    import nipype.pipeline.engine as npe
    from nipype.interfaces.io import DataSink
    from nipype import config
    import tempfile
    from clinica.utils.inputs import check_caps_folder
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.nipype import fix_join
    from .T1_preparedl_utils import (extract_slices, extract_patches,
                                     save_as_pt, container_from_filename,
                                     get_data_datasink)

    T1W_LINEAR = {
        'pattern': '*space-MNI152NLin2009cSym_res-1x1x1_T1w.nii.gz',
        'description': 'T1W Image registered using T1_Linear'
    }

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    check_caps_folder(caps_directory)
    is_bids_dir = False
    use_session_tsv = False

    sessions, subjects = get_subject_session_list(caps_directory, tsv,
                                                  is_bids_dir, use_session_tsv,
                                                  working_directory)

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from t1_linear folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, caps_directory,
                                        T1W_LINEAR)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    # Read node
    # ----------------------
    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    # Get subject ID node
    # ----------------------
    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    # The processing nodes

    # Node to save MRI in nii.gz format into pytorch .pt format
    # ----------------------
    save_as_pt = npe.MapNode(name='save_as_pt',
                             iterfield=['input_img'],
                             interface=nutil.Function(
                                 function=save_as_pt,
                                 input_names=['input_img'],
                                 output_names=['output_file']))

    # Extract slices node (options: 3 directions, mode)
    # ----------------------
    extract_slices = npe.MapNode(
        name='extract_slices',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_slices,
            input_names=['preprocessed_T1', 'slice_direction', 'slice_mode'],
            output_names=['output_file_rgb', 'output_file_original']))

    extract_slices.inputs.slice_direction = slice_direction
    extract_slices.inputs.slice_mode = slice_mode

    # Extract patches node (options, patch size and stride size)
    # ----------------------
    extract_patches = npe.MapNode(
        name='extract_patches',
        iterfield=['preprocessed_T1'],
        interface=nutil.Function(
            function=extract_patches,
            input_names=['preprocessed_T1', 'patch_size', 'stride_size'],
            output_names=['output_patch']))

    extract_patches.inputs.patch_size = patch_size
    extract_patches.inputs.stride_size = stride_size

    # Output node
    # ----------------------
    outputnode = npe.Node(nutil.IdentityInterface(fields=['preprocessed_T1']),
                          name='outputnode')

    # Node
    # ----------------------
    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # ----------------------
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    # Write node
    # ----------------------
    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    subfolder = 'image_based'
    wf = npe.Workflow(name='dl_prepare_data', base_dir=working_directory)

    # Connections
    # ----------------------
    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (read_node, save_as_pt, [('t1w', 'input_img')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        # Connect to DataSink
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')])
    ])

    if extract_method == 'slice':
        subfolder = 'slice_based'
        wf.connect([(save_as_pt, extract_slices, [('output_file',
                                                   'preprocessed_T1')]),
                    (extract_slices, write_node, [('output_file_rgb',
                                                   '@slices_rgb_T1')]),
                    (extract_slices, write_node, [('output_file_original',
                                                   '@slices_original_T1')])])
    elif extract_method == 'patch':
        subfolder = 'patch_based'
        wf.connect([(save_as_pt, extract_patches, [
            ('output_file', 'preprocessed_T1')
        ]), (extract_patches, write_node, [('output_patch', '@patches_T1')])])
    else:
        wf.connect([(save_as_pt, write_node, [('output_file',
                                               '@output_pt_file')])])

    wf.connect([(container_path, write_node,
                 [(('container', fix_join, 'deeplearning_prepare_data',
                    subfolder, 't1_linear'), 'container')])])

    return wf
コード例 #10
0
    def __init__(self,
                 bids_directory=None,
                 caps_directory=None,
                 tsv_file=None,
                 overwrite_caps=False,
                 base_dir=None,
                 parameters={},
                 name=None):
        """Init a Pipeline object.

        Args:
            bids_directory (optional): Path to a BIDS directory.
            caps_directory (optional): Path to a CAPS directory.
            tsv_file (optional): Path to a subjects-sessions `.tsv` file.
            overwrite_caps (optional): Boolean which specifies overwritten of output directory.
            base_dir (optional): Working directory (attribute of Nipype::Workflow class).
            name (optional): Pipeline name.
        """
        import inspect
        import os
        from tempfile import mkdtemp
        from colorama import Fore
        from clinica.utils.inputs import check_caps_folder
        from clinica.utils.inputs import check_bids_folder
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.participant import get_subject_session_list

        self._is_built = False
        self._overwrite_caps = overwrite_caps
        self._bids_directory = bids_directory
        self._caps_directory = caps_directory
        self._verbosity = 'debug'
        self._tsv_file = tsv_file
        self._info_file = os.path.join(
            os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))),
            'info.json')
        self._info = {}

        if base_dir is None:
            self.base_dir = mkdtemp()
            self._base_dir_was_specified = False
        else:
            self.base_dir = base_dir
            self._base_dir_was_specified = True

        if name:
            self._name = name
        else:
            self._name = self.__class__.__name__
        self._parameters = parameters

        if self._bids_directory is None:
            if self._caps_directory is None:
                raise RuntimeError(
                    '%s[Error] The %s pipeline does not contain BIDS nor CAPS directory at the initialization.%s'
                    % (Fore.RED, self._name, Fore.RESET))

            check_caps_folder(self._caps_directory)
            input_dir = self._caps_directory
            is_bids_dir = False
        else:
            check_bids_folder(self._bids_directory)
            input_dir = self._bids_directory
            is_bids_dir = True
        self._sessions, self._subjects = get_subject_session_list(
            input_dir, tsv_file, is_bids_dir, False, base_dir)

        self.init_nodes()