def get_processed_images(caps_directory, subjects, sessions):
        import os
        import re

        from clinica.utils.input_files import T1_FS_T_DESTRIEUX
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.longitudinal import get_long_id
        from clinica.utils.participant import get_unique_subjects

        [list_participant_id, list_list_session_ids] = get_unique_subjects(
            subjects, sessions
        )
        list_long_id = [
            get_long_id(list_session_ids) for list_session_ids in list_list_session_ids
        ]

        image_ids = []
        if os.path.isdir(caps_directory):
            t1_freesurfer_files = clinica_file_reader(
                list_participant_id,
                list_long_id,
                caps_directory,
                T1_FS_T_DESTRIEUX,
                False,
            )
            image_ids = [
                re.search(r"(sub-[a-zA-Z0-9]+)_(long-[a-zA-Z0-9]+)", file).group()
                for file in t1_freesurfer_files
            ]
        return image_ids
Ejemplo n.º 2
0
    def _get_full_image(self) -> torch.Tensor:
        """
        Allows to get the an example of the image mode corresponding to the dataset.
        Useful to compute the number of elements if mode != image.

        Returns:
            image tensor of the full image first image.
        """
        import nibabel as nib
        from clinica.utils.inputs import clinica_file_reader

        participant_id = self.df.loc[0, "participant_id"]
        session_id = self.df.loc[0, "session_id"]
        cohort = self.df.loc[0, "cohort"]

        try:
            image_path = self._get_image_path(participant_id, session_id,
                                              cohort)
            image = torch.load(image_path)
        except IndexError:
            file_type = self.preprocessing_dict["file_type"]
            results = clinica_file_reader([participant_id], [session_id],
                                          self.caps_dict[cohort], file_type)
            image_nii = nib.load(results[0])
            image_np = image_nii.get_fdata()
            image = ToTensor()(image_np)

        return image
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.exceptions import ClinicaException, ClinicaCAPSError
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import t1_volume_i_th_iteration_group_template, t1_volume_dartel_input_tissue
        from clinica.utils.ux import print_images_to_process

        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True))

        all_errors = []

        # Dartel Input Tissues
        # ====================
        d_input = []
        for tissue_number in self.parameters['tissues']:
            try:
                current_file = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number))
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Dartel Templates
        # ================
        dartel_iter_templates = []
        for i in range(1, 7):
            try:
                current_iter = clinica_group_reader(
                    self.caps_directory,
                    t1_volume_i_th_iteration_group_template(
                        self.parameters['group_id'], i))

                dartel_iter_templates.append(current_iter)
            except ClinicaException as e:
                all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        read_input_node.inputs.dartel_input_images = d_input
        read_input_node.inputs.dartel_iteration_templates = dartel_iter_templates

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)

        self.connect([(read_input_node, self.input_node,
                       [('dartel_input_images', 'dartel_input_images')]),
                      (read_input_node, self.input_node,
                       [('dartel_iteration_templates',
                         'dartel_iteration_templates')])])
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from colorama import Fore

        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.input_files import t1_volume_template_tpm_in_mni
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import (
            print_groups_in_caps_directory,
            print_images_to_process,
        )

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, "groups",
                             f"group-{self.parameters['group_label']}")):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                f"%{Fore.RED}Group {self.parameters['group_label']} does not exist. "
                f"Did you run t1-volume or t1-volume-create-dartel pipeline?{Fore.RESET}"
            )

        try:
            gm_mni = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                t1_volume_template_tpm_in_mni(self.parameters["group_label"],
                                              1, True),
            )
        except ClinicaException as e:
            final_error_str = "Clinica faced error(s) while trying to read files in your CAPS directory.\n"
            final_error_str += str(e)
            raise ClinicaCAPSError(final_error_str)

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
        )
        read_parameters_node.inputs.file_list = gm_mni
        read_parameters_node.inputs.atlas_list = self.parameters["atlases"]

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last a few seconds per image.")

        self.connect([
            (read_parameters_node, self.input_node, [("file_list", "file_list")
                                                     ]),
            (read_parameters_node, self.input_node, [("atlas_list",
                                                      "atlas_list")]),
        ])
    def get_to_process_with_atlases(caps_directory: str, subjects: list,
                                    sessions: list,
                                    atlas_dir_path: str) -> list:
        import itertools
        import os
        from pathlib import Path

        from clinica.utils.filemanip import extract_image_ids
        from clinica.utils.input_files import T1_FS_DESTRIEUX
        from clinica.utils.inputs import clinica_file_reader

        initial_list_to_process = []
        atlas_list = []
        for path in Path(atlas_dir_path).rglob("*rh*6p0.gcs"):
            atlas_name = path.name.split(".")[1].split("_")[0]
            atlas_list.append(atlas_name)

        if os.path.isdir(caps_directory):
            for atlas in atlas_list:

                atlas_info = dict({
                    "pattern":
                    "t1/freesurfer_cross_sectional/sub-*_ses-*/stats/rh." +
                    atlas + ".stats",
                    "description":
                    atlas + "-based segmentation",
                    "needed_pipeline":
                    "t1-freesurfer",
                })
                t1_freesurfer_output = clinica_file_reader(
                    subjects, sessions, caps_directory, T1_FS_DESTRIEUX, False)
                t1_freesurfer_files = clinica_file_reader(
                    subjects, sessions, caps_directory, atlas_info, False)

                image_ids = extract_image_ids(t1_freesurfer_files)
                image_ids_2 = extract_image_ids(t1_freesurfer_output)
                to_process = list(set(image_ids_2) - set(image_ids))
                initial_list_to_process.append(([atlas], to_process))

        list_to_process = []
        for i in initial_list_to_process:
            if list(itertools.product(i[0], i[1])) != []:
                list_to_process = list_to_process + list(
                    itertools.product(i[0], i[1]))
        return list_to_process
Ejemplo n.º 6
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        all_errors = []
        t1w_in_ixi549space = {
            "pattern":
            os.path.join(
                "t1",
                "spm",
                "segmentation",
                "normalized_space",
                "*_*_space-Ixi549Space_T1w.nii*",
            ),
            "description":
            "Tissue probability map in native space",
            "needed_pipeline":
            "t1-volume-tissue-segmentation",
        }
        try:
            t1w_files = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                t1w_in_ixi549space,
            )
        except ClinicaException as e:
            all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = "Clinica faced errors while trying to read files in your CAPS directory.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise RuntimeError(error_message)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last a few seconds per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("norm_t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        self.connect([
            (read_node, self.input_node, [("norm_t1w", "norm_t1w")]),
        ])
Ejemplo n.º 7
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_LINEAR
        from clinica.utils.input_files import T1W_LINEAR_CROPPED
        from clinica.utils.ux import print_images_to_process

        if self.parameters.get('use_uncropped_image'):
            FILE_TYPE = T1W_LINEAR
        else:
            FILE_TYPE = T1W_LINEAR_CROPPED

        # T1w_Linear file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.caps_directory, FILE_TYPE)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
                e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 30 seconds per image.'
                   )  # Replace by adequate computational time.

        if self.parameters.get('extract_method') == 'slice':
            self.slice_direction = self.parameters.get('slice_direction')
            self.slice_mode = self.parameters.get('slice_mode')
        else:
            self.slice_direction = 'axial'
            self.slice_mode = 'rgb'

        if self.parameters.get('extract_method') == 'patch':
            self.patch_size = self.parameters.get('patch_size')
            self.stride_size = self.parameters.get('stride_size')
        else:
            self.patch_size = 50
            self.stride_size = 50

        # The reading node
        # -------------------------
        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('input_nifti', t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))

        self.connect([
            (read_node, self.input_node, [('input_nifti', 'input_nifti')]),
        ])
Ejemplo n.º 8
0
    def get_images(self):
        """
        Returns: a list of filenames
        """
        from clinica.utils.input_files import pet_volume_normalized_suvr_pet
        from clinica.utils.inputs import clinica_file_reader

        if self._images is not None:
            return self._images

        if self._input_params["image_type"] == "T1w":
            if self._input_params["fwhm"] == 0:
                fwhm_key_value = ""
            else:
                fwhm_key_value = f"_fwhm-{self._input_params['fwhm']}mm"

            self._images = [
                path.join(
                    self._input_params["caps_directory"],
                    "subjects",
                    self._subjects[i],
                    self._sessions[i],
                    "t1",
                    "spm",
                    "dartel",
                    f"group-{self._input_params['group_label']}",
                    f"{self._subjects[i]}_{self._sessions[i]}_T1w"
                    f"_segm-graymatter_space-Ixi549Space_modulated-{self._input_params['modulated']}{fwhm_key_value}_probability.nii.gz",
                ) for i in range(len(self._subjects))
            ]

            for image in self._images:
                if not path.exists(image):
                    raise Exception("File %s doesn't exists." % image)

        elif self._input_params["image_type"] == "PET":
            caps_files_information = pet_volume_normalized_suvr_pet(
                acq_label=self._input_params["acq_label"],
                group_label=self._input_params["group_label"],
                suvr_reference_region=self.
                _input_params["suvr_reference_region"],
                use_brainmasked_image=True,
                use_pvc_data=self._input_params["use_pvc_data"],
                fwhm=self._input_params["fwhm"],
            )
            self._images = clinica_file_reader(
                self._subjects,
                self._sessions,
                self._input_params["caps_directory"],
                caps_files_information,
            )
        else:
            raise ValueError(
                f"Unknown image type (given value: {self._input_params['image_type']})"
            )

        return self._images
Ejemplo n.º 9
0
    def _get_image_path(self, participant: str, session: str,
                        cohort: str) -> str:
        """
        Gets the path to the tensor image (*.pt)

        Args:
            participant: ID of the participant.
            session: ID of the session.
            cohort: Name of the cohort.
        Returns:
            image_path: path to the tensor containing the whole image.
        """
        from clinica.utils.inputs import clinica_file_reader

        # Try to find .nii.gz file
        try:
            file_type = self.preprocessing_dict["file_type"]
            results = clinica_file_reader([participant], [session],
                                          self.caps_dict[cohort], file_type)
            filepath = results[0]
            image_filename = path.basename(filepath[0]).replace(
                ".nii.gz", ".pt")
            folder, _ = compute_folder_and_file_type(self.preprocessing_dict)
            image_dir = path.join(
                self.caps_dict[cohort],
                "subjects",
                participant,
                session,
                "deeplearning_prepare_data",
                "image_based",
                folder,
            )
            image_path = path.join(image_dir, image_filename)
        # Try to find .pt file
        except ClinicaCAPSError:
            file_type = self.preprocessing_dict["file_type"]
            file_type["pattern"] = file_type["pattern"].replace(
                ".nii.gz", ".pt")
            results = clinica_file_reader([participant], [session],
                                          self.caps_dict[cohort], file_type)
            filepath = results[0]
            image_path = filepath[0]

        return image_path
Ejemplo n.º 10
0
 def get_processed_images(caps_directory, subjects, sessions):
     import os
     from clinica.utils.inputs import clinica_file_reader
     from clinica.utils.input_files import T1_FS_DESTRIEUX
     from clinica.utils.filemanip import extract_image_ids
     image_ids = []
     if os.path.isdir(caps_directory):
         t1_freesurfer_files = clinica_file_reader(subjects, sessions,
                                                   caps_directory,
                                                   T1_FS_DESTRIEUX, False)
         image_ids = extract_image_ids(t1_freesurfer_files)
     return image_ids
Ejemplo n.º 11
0
 def get_processed_images(caps_directory, subjects, sessions):
     import os
     from clinica.utils.inputs import clinica_file_reader
     from clinica.utils.input_files import T1W_LINEAR_CROPPED
     from clinica.utils.filemanip import extract_image_ids
     image_ids = []
     if os.path.isdir(caps_directory):
         cropped_files = clinica_file_reader(subjects, sessions,
                                             caps_directory,
                                             T1W_LINEAR_CROPPED, False)
         image_ids = extract_image_ids(cropped_files)
     return image_ids
    def get_processed_images(caps_directory, subjects, sessions):
        import os

        from clinica.utils.filemanip import extract_image_ids
        from clinica.utils.input_files import DWI_PREPROC_NII
        from clinica.utils.inputs import clinica_file_reader

        image_ids = []
        if os.path.isdir(caps_directory):
            preproc_files = clinica_file_reader(subjects, sessions,
                                                caps_directory,
                                                DWI_PREPROC_NII, False)
            image_ids = extract_image_ids(preproc_files)
        return image_ids
Ejemplo n.º 13
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.

        Raise:
            ClinicaBIDSError: If there are duplicated files or missing files for any subject
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.iotools.utils.data_handling import (
            check_volume_location_in_world_coordinate_system, )
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = f"Clinica faced error(s) while trying to read files in your BIDS directory.\n{str(e)}"
            raise ClinicaBIDSError(err)

        check_volume_location_in_world_coordinate_system(
            t1w_files,
            self.bids_directory,
            skip_question=self.parameters["skip_question"],
        )

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                "The pipeline will last approximately 10 minutes per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        self.connect([
            (read_node, self.input_node, [("t1w", "t1w")]),
        ])
Ejemplo n.º 14
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import t1_volume_template_tpm_in_mni
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, 'groups',
                             'group-' + self.parameters['group_id'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_id'], Fore.RESET))

        try:
            gm_mni = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                t1_volume_template_tpm_in_mni(self.parameters['group_id'], 1,
                                              True))
        except ClinicaException as e:
            final_error_str = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n'
            final_error_str += str(e)
            raise ClinicaCAPSError(final_error_str)

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True))
        read_parameters_node.inputs.file_list = gm_mni
        read_parameters_node.inputs.atlas_list = self.parameters['atlases']

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last a few seconds per image.')

        self.connect([(read_parameters_node, self.input_node, [('file_list',
                                                                'file_list')]),
                      (read_parameters_node, self.input_node,
                       [('atlas_list', 'atlas_list')])])
Ejemplo n.º 15
0
    def __getitem__(self, idx):
        if self.use_extracted_tensors:
            image = self.tensor_dataset[idx]
            image = self.pt_transform(image)
        else:
            subject = self.df.loc[idx, "participant_id"]
            session = self.df.loc[idx, "session_id"]
            image_path = clinica_file_reader([subject], [session],
                                             self.img_dir,
                                             T1W_LINEAR_CROPPED)[0]
            image = nib.load(image_path[0])
            image = self.nii_transform(image)

        sample = {
            "image": image,
            "participant_id": subject,
            "session_id": session
        }

        return sample
Ejemplo n.º 16
0
    def get_processed_images(caps_directory, subjects, sessions):
        import os
        from clinica.utils.filemanip import extract_image_ids
        from clinica.utils.inputs import clinica_file_reader

        information = {
            "pattern":
            os.path.join(
                "t1_extensive",
                "*_*_space-Ixi549Space_desc-SkullStripped_T1w.nii*",
            ),
            "description":
            "Skull-stripped T1w in Ixi549Space space.",
            "needed_pipeline":
            "t1-volume-tissue-segmentation",
        }
        image_ids = []
        if os.path.isdir(caps_directory):
            skull_stripped_files = clinica_file_reader(subjects, sessions,
                                                       caps_directory,
                                                       information, False)
            image_ids = extract_image_ids(skull_stripped_files)
        return image_ids
    def build_input_node(self):
        """Build and connect an input node to the pipelines.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.utils.input_files as input_files
        from clinica.utils.dwi import check_dwi_volume
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.inputs import clinica_file_reader

        all_errors = []
        try:
            t1w_files = clinica_file_reader(self.subjects,
                                            self.sessions,
                                            self.bids_directory,
                                            input_files.T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)
        try:
            dwi_files = clinica_file_reader(self.subjects,
                                            self.sessions,
                                            self.bids_directory,
                                            input_files.DWI_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # bval files
        try:
            bval_files = clinica_file_reader(self.subjects,
                                             self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        # bvec files
        try:
            bvec_files = clinica_file_reader(self.subjects,
                                             self.sessions,
                                             self.bids_directory,
                                             input_files.DWI_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaBIDSError(error_message)

        # Perform the check after potential issue while reading inputs
        for (dwi, bvec, bval) in zip(dwi_files, bvec_files, bval_files):
            check_dwi_volume(in_dwi=dwi, in_bvec=bvec, in_bval=bval)

        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True),
                                   iterables=[('T1w', t1w_files),
                                              ('dwi', dwi_files),
                                              ('bvec', bvec_files),
                                              ('bval', bval_files)],
                                   synchronize=True)

        self.connect([
            (read_input_node, self.input_node, [('T1w', 'T1w')]),
            (read_input_node, self.input_node, [('dwi', 'dwi')]),
            (read_input_node, self.input_node, [('bvec', 'bvec')]),
            (read_input_node, self.input_node, [('bval', 'bval')])
        ])
Ejemplo n.º 18
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        from os import pardir
        from os.path import dirname, join, abspath, exists
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import fetch_file, RemoteFileStructure
        from clinica.utils.ux import print_images_to_process
        from clinica.utils.stream import cprint

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, 'resources', 'masks')
        url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
        FILE1 = RemoteFileStructure(
            filename='ref_cropped_template.nii.gz',
            url=url_aramis,
            checksum=
            '67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3')
        FILE2 = RemoteFileStructure(
            filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
            url=url_aramis,
            checksum=
            '93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34')

        self.ref_template = join(path_to_mask, FILE2.filename)
        self.ref_crop = join(path_to_mask, FILE1.filename)

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (mni_icbm152) for processing:',
                    err)

        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (ref_crop) for processing:',
                    err)

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(
                "%sClinica found %s image(s) already processed in CAPS directory:%s"
                % (Fore.YELLOW, len(processed_ids), Fore.RESET))
            for image_id in processed_ids:
                cprint("%s\t%s%s" %
                       (Fore.YELLOW, image_id.replace('_', ' | '), Fore.RESET))
            cprint("%s\nImage(s) will be ignored by Clinica.\n%s" %
                   (Fore.YELLOW, Fore.RESET))
            input_ids = [
                p_id + '_' + s_id
                for p_id, s_id in zip(self.subjects, self.sessions)
            ]
            to_process_ids = list(set(input_ids) - set(processed_ids))
            self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n' + str(
                e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 6 minutes per image.')

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('t1w', t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))
        self.connect([
            (read_node, self.input_node, [('t1w', 't1w')]),
        ])
Ejemplo n.º 19
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import sys

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.input_files import t1_volume_dartel_input_tissue
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import (
            print_begin_image,
            print_groups_in_caps_directory,
            print_images_to_process,
        )

        representative_output = os.path.join(
            self.caps_directory,
            "groups",
            f"group-{self.parameters['group_label']}",
            "t1",
            f"group-{self.parameters['group_label']}_template.nii.gz",
        )
        if os.path.exists(representative_output):
            cprint(
                msg=
                (f"DARTEL template for {self.parameters['group_label']} already exists. "
                 "Currently, Clinica does not propose to overwrite outputs for this pipeline."
                 ),
                lvl="warning",
            )
            print_groups_in_caps_directory(self.caps_directory)
            sys.exit(0)

        # Check that there is at least 2 subjects
        if len(self.subjects) <= 1:
            raise ClinicaException(
                "This pipeline needs at least 2 images to create DARTEL "
                f"template but Clinica only found {len(self.subjects)}.")

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
        )
        all_errors = []
        d_input = []
        for tissue_number in self.parameters["dartel_tissues"]:
            try:
                current_file = clinica_file_reader(
                    self.subjects,
                    self.sessions,
                    self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number),
                )
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = "Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise RuntimeError(error_message)

        # d_input is a list of size len(self.parameters['dartel_tissues'])
        #     Each element of this list is a list of size len(self.subjects)
        read_parameters_node.inputs.dartel_inputs = d_input

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                "Computational time for DARTEL creation will depend on the number of images."
            )
            print_begin_image(f"group-{self.parameters['group_label']}")

        # fmt: off
        self.connect([(read_parameters_node, self.input_node,
                       [("dartel_inputs", "dartel_input_images")])])
Ejemplo n.º 20
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from os.path import join, exists
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import (
            t1_volume_final_group_template, t1_volume_native_tpm,
            t1_volume_native_tpm_in_mni, t1_volume_deformation_to_template,
            bids_pet_nii, T1W_NII)
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process
        from clinica.iotools.utils.data_handling import check_relative_volume_location_in_world_coordinate_system
        from clinica.utils.filemanip import save_participants_sessions
        from clinica.utils.pet import read_psf_information, get_suvr_mask
        from clinica.utils.stream import cprint

        # Check that group already exists
        if not exists(
                join(self.caps_directory, 'groups',
                     'group-' + self.parameters['group_label'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_label'], Fore.RESET))

        # Tissues DataGrabber
        # ====================
        all_errors = []

        # Grab reference mask
        reference_mask_file = get_suvr_mask(
            self.parameters['suvr_reference_region'])

        # PET from BIDS directory
        try:
            pet_bids = clinica_file_reader(
                self.subjects, self.sessions, self.bids_directory,
                bids_pet_nii(self.parameters['acq_label']))
        except ClinicaException as e:
            all_errors.append(e)

        # Native T1w-MRI
        try:
            t1w_bids = clinica_file_reader(self.subjects, self.sessions,
                                           self.bids_directory, T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # mask_tissues
        tissues_input = []
        for tissue_number in self.parameters['mask_tissues']:
            try:
                current_file = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_native_tpm_in_mni(tissue_number, False))
                tissues_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)
        # Tissues_input has a length of len(self.parameters['mask_tissues']). Each of these elements has a size of
        # len(self.subjects). We want the opposite: a list of size len(self.subjects) whose elements have a size of
        # len(self.parameters['mask_tissues']. The trick is to iter on elements with zip(*my_list)
        tissues_input_final = []
        for subject_tissue_list in zip(*tissues_input):
            tissues_input_final.append(subject_tissue_list)
        tissues_input = tissues_input_final

        # Flowfields
        try:
            flowfields_caps = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                t1_volume_deformation_to_template(
                    self.parameters['group_label']))
        except ClinicaException as e:
            all_errors.append(e)

        # Dartel Template
        try:
            final_template = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters['group_label']))
        except ClinicaException as e:
            all_errors.append(e)

        if self.parameters['pvc_psf_tsv'] is not None:
            iterables_psf = read_psf_information(
                self.parameters['pvc_psf_tsv'], self.subjects, self.sessions)
            self.parameters['apply_pvc'] = True
        else:
            iterables_psf = [[]] * len(self.subjects)
            self.parameters['apply_pvc'] = False

        if self.parameters['apply_pvc']:
            # pvc tissues input
            pvc_tissues_input = []
            for tissue_number in self.parameters['pvc_mask_tissues']:
                try:
                    current_file = clinica_file_reader(
                        self.subjects, self.sessions, self.caps_directory,
                        t1_volume_native_tpm(tissue_number))
                    pvc_tissues_input.append(current_file)
                except ClinicaException as e:
                    all_errors.append(e)

            if len(all_errors) == 0:
                pvc_tissues_input_final = []
                for subject_tissue_list in zip(*pvc_tissues_input):
                    pvc_tissues_input_final.append(subject_tissue_list)
                pvc_tissues_input = pvc_tissues_input_final
        else:
            pvc_tissues_input = []

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaException(error_message)

        check_relative_volume_location_in_world_coordinate_system(
            'T1w-MRI', t1w_bids, self.parameters['acq_label'] + ' PET',
            pet_bids, self.bids_directory, self.parameters['acq_label'])

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        save_participants_sessions(self.subjects, self.sessions,
                                   folder_participants_tsv)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('List available in %s' %
                   os.path.join(folder_participants_tsv, 'participants.tsv'))
            cprint(
                'The pipeline will last approximately 10 minutes per image.')

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
            iterables=[('pet_image', pet_bids), ('t1_image_native', t1w_bids),
                       ('mask_tissues', tissues_input), ('psf', iterables_psf),
                       ('flow_fields', flowfields_caps),
                       ('pvc_mask_tissues', pvc_tissues_input)],
            synchronize=True)

        read_input_node.inputs.reference_mask = reference_mask_file
        read_input_node.inputs.dartel_template = final_template

        self.connect([(read_input_node, self.input_node,
                       [('pet_image', 'pet_image'),
                        ('t1_image_native', 't1_image_native'),
                        ('mask_tissues', 'mask_tissues'),
                        ('flow_fields', 'flow_fields'),
                        ('dartel_template', 'dartel_template'),
                        ('reference_mask', 'reference_mask'), ('psf', 'psf'),
                        ('pvc_mask_tissues', 'pvc_mask_tissues')])])
Ejemplo n.º 21
0
def DeepLearningPrepareData(caps_directory, tsv_file, n_proc, parameters):
    import os
    from os import path

    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from joblib import Parallel, delayed
    from torch import save as save_tensor

    from clinicadl.utils.exceptions import ClinicaDLArgumentError
    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import check_mask_list, compute_folder_and_file_type

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    logger.debug(f"CAPS directory : {caps_directory}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(caps_directory, tsv_file,
                                                  is_bids_dir, False, None)
    if parameters["prepare_dl"]:
        logger.info(
            f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
        )
    else:
        logger.info(
            f"Images will be extracted in Pytorch tensor from {len(sessions)} images."
        )
        logger.info(
            f"Information for {parameters['mode']} will be saved in output JSON file and will be used "
            f"during training for on-the-fly extraction.")
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    mod_subfolder, file_type = compute_folder_and_file_type(parameters)
    parameters["file_type"] = file_type

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      file_type)[0]

    def write_output_imgs(output_mode, container, subfolder):
        # Write the extracted tensor on a .pt file
        for filename, tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, filename)
            save_tensor(tensor, output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    if parameters["mode"] == "image" or not parameters["prepare_dl"]:

        def prepare_image(file):
            from .extract_utils import extract_images

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_image)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "slice":

        def prepare_slice(file):
            from .extract_utils import extract_slices

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_slice)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "patch":

        def prepare_patch(file):
            from .extract_utils import extract_patches

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_patch)(file)
                                for file in input_files)

    elif parameters["prepare_dl"] and parameters["mode"] == "roi":

        def prepare_roi(file):
            from .extract_utils import extract_roi

            logger.debug(f"  Processing of {file}.")
            container = container_from_filename(file)
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                if not parameters["roi_custom_template"]:
                    raise ClinicaDLArgumentError(
                        "A custom template must be defined when the modality is set to custom."
                    )
                parameters["roi_template"] = parameters["roi_custom_template"]
                parameters["roi_mask_pattern"] = parameters[
                    "roi_custom_mask_pattern"]
            else:
                from .extract_utils import PATTERN_DICT, TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
                parameters["roi_mask_pattern"] = PATTERN_DICT[
                    parameters["preprocessing"]]

            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ClinicaDLArgumentError(
                    "A list of regions of interest must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_names=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
            write_output_imgs(output_mode, container, subfolder)

        Parallel(n_jobs=n_proc)(delayed(prepare_roi)(file)
                                for file in input_files)

    else:
        raise NotImplementedError(
            f"Extraction is not implemented for mode {parameters['mode']}.")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
Ejemplo n.º 22
0
    def build_input_node_longitudinal(self):
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.utils.input_files as input_files
        from clinica.iotools.utils.data_handling import (
            check_relative_volume_location_in_world_coordinate_system,
        )
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.inputs import clinica_file_reader

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(
                fields=self.get_input_fields(), mandatory_inputs=True
            ),
            synchronize=True,
        )

        all_errors = []
        try:
            read_parameters_node.inputs.pet = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.bids_directory,
                input_files.bids_pet_nii(self.parameters["acq_label"]),
            )
        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.orig_nu = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_ORIG_NU,
            )

        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.white_surface_right = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_SURF_R,
            )
        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.white_surface_left = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_SURF_L,
            )

        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.destrieux_left = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_DESTRIEUX_PARC_L,
            )

        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.destrieux_right = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_DESTRIEUX_PARC_R,
            )

        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.desikan_left = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_DESIKAN_PARC_L,
            )

        except ClinicaException as e:
            all_errors.append(e)

        try:
            read_parameters_node.inputs.desikan_right = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                input_files.T1_FS_LONG_DESIKAN_PARC_R,
            )

        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = "Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaException(error_message)

        check_relative_volume_location_in_world_coordinate_system(
            "T1w-MRI (orig_nu.mgz)",
            read_parameters_node.inputs.orig_nu,
            self.parameters["acq_label"] + " PET",
            read_parameters_node.inputs.pet,
            self.bids_directory,
            self.parameters["acq_label"],
        )

        # fmt: off
        self.connect(
            [
                (read_parameters_node, self.input_node, [("pet", "pet"),
                                                         ("orig_nu", "orig_nu"),
                                                         ("white_surface_left", "white_surface_left"),
                                                         ("white_surface_right", "white_surface_right"),
                                                         ("destrieux_left", "destrieux_left"),
                                                         ("destrieux_right", "destrieux_right"),
                                                         ("desikan_left", "desikan_left"),
                                                         ("desikan_right", "desikan_right")])
            ]
        )
Ejemplo n.º 23
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import sys
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import t1_volume_dartel_input_tissue
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process, print_begin_image

        representative_output = os.path.join(
            self.caps_directory, 'groups',
            'group-' + self.parameters['group_label'], 't1',
            'group-' + self.parameters['group_label'] + '_template.nii.gz')
        if os.path.exists(representative_output):
            cprint(
                "%sDARTEL template for %s already exists. Currently, Clinica does not propose to overwrite outputs "
                "for this pipeline.%s" %
                (Fore.YELLOW, self.parameters['group_label'], Fore.RESET))
            print_groups_in_caps_directory(self.caps_directory)
            sys.exit(0)

        # Check that there is at least 2 subjects
        if len(self.subjects) <= 1:
            raise ClinicaException(
                '%sThis pipeline needs at least 2 images to create DARTEL template but '
                'Clinica only found %s.%s' %
                (Fore.RED, len(self.subjects), Fore.RESET))

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True))
        all_errors = []
        d_input = []
        for tissue_number in self.parameters['dartel_tissues']:
            try:
                current_file = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number))
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise RuntimeError(error_message)

        # d_input is a list of size len(self.parameters['dartel_tissues'])
        #     Each element of this list is a list of size len(self.subjects)
        read_parameters_node.inputs.dartel_inputs = d_input

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                'Computational time for DARTEL creation will depend on the number of images.'
            )
            print_begin_image('group-' + self.parameters['group_label'])

        self.connect([(read_parameters_node, self.input_node,
                       [('dartel_inputs', 'dartel_input_images')])])
Ejemplo n.º 24
0
def DeepLearningPrepareData(caps_directory, tsv_file, parameters):
    import os
    from os import path

    from clinica.utils.exceptions import (
        ClinicaBIDSError,
        ClinicaCAPSError,
        ClinicaException,
    )
    from clinica.utils.input_files import (
        T1W_EXTENSIVE,
        T1W_LINEAR,
        T1W_LINEAR_CROPPED,
        pet_linear_nii,
    )
    from clinica.utils.inputs import check_caps_folder, clinica_file_reader
    from clinica.utils.nipype import container_from_filename
    from clinica.utils.participant import get_subject_session_list
    from torch import save as save_tensor

    from clinicadl.utils.preprocessing import write_preprocessing

    from .extract_utils import (
        check_mask_list,
        extract_images,
        extract_patches,
        extract_roi,
        extract_slices,
    )

    logger = getLogger("clinicadl")

    # Get subject and session list
    check_caps_folder(caps_directory)
    input_dir = caps_directory
    logger.debug(f"CAPS directory : {input_dir}.")
    is_bids_dir = False
    sessions, subjects = get_subject_session_list(input_dir, tsv_file,
                                                  is_bids_dir, False, None)
    logger.info(
        f"{parameters['mode']}s will be extracted in Pytorch tensor from {len(sessions)} images."
    )
    logger.debug(f"List of subjects: \n{subjects}.")
    logger.debug(f"List of sessions: \n{sessions}.")

    # Select the correct filetype corresponding to modality
    # and select the right folder output name corresponding to modality
    logger.debug(
        f"Selected images are preprocessed with {parameters['preprocessing']} pipeline`."
    )
    if parameters["preprocessing"] == "t1-linear":
        mod_subfolder = "t1_linear"
        if parameters["use_uncropped_image"]:
            FILE_TYPE = T1W_LINEAR
        else:
            FILE_TYPE = T1W_LINEAR_CROPPED
    if parameters["preprocessing"] == "t1-extensive":
        mod_subfolder = "t1_extensive"
        FILE_TYPE = T1W_EXTENSIVE
        parameters["uncropped_image"] = None
    if parameters["preprocessing"] == "pet-linear":
        mod_subfolder = "pet_linear"
        FILE_TYPE = pet_linear_nii(
            parameters["acq_label"],
            parameters["suvr_reference_region"],
            parameters["use_uncropped_image"],
        )
    if parameters["preprocessing"] == "custom":
        mod_subfolder = "custom"
        FILE_TYPE = {
            "pattern": f"*{parameters['custom_suffix']}",
            "description": "Custom suffix",
        }
        parameters["use_uncropped_image"] = None
    parameters["file_type"] = FILE_TYPE

    # Input file:
    input_files = clinica_file_reader(subjects, sessions, caps_directory,
                                      FILE_TYPE)

    # Loop on the images
    for file in input_files:
        logger.debug(f"  Processing of {file}.")
        container = container_from_filename(file)
        # Extract the wanted tensor
        if parameters["mode"] == "image":
            subfolder = "image_based"
            output_mode = extract_images(file)
            logger.debug(f"    Image extracted.")
        elif parameters["mode"] == "slice":
            subfolder = "slice_based"
            output_mode = extract_slices(
                file,
                slice_direction=parameters["slice_direction"],
                slice_mode=parameters["slice_mode"],
                discarded_slices=parameters["discarded_slices"],
            )
            logger.debug(f"    {len(output_mode)} slices extracted.")
        elif parameters["mode"] == "patch":
            subfolder = "patch_based"
            output_mode = extract_patches(
                file,
                patch_size=parameters["patch_size"],
                stride_size=parameters["stride_size"],
            )
            logger.debug(f"    {len(output_mode)} patches extracted.")
        elif parameters["mode"] == "roi":
            subfolder = "roi_based"
            if parameters["preprocessing"] == "custom":
                parameters["roi_template"] = parameters["roi_custom_template"]
                if parameters["roi_custom_template"] is None:
                    raise ValueError(
                        "A custom template must be defined when the modality is set to custom."
                    )
            else:
                from .extract_utils import TEMPLATE_DICT

                parameters["roi_template"] = TEMPLATE_DICT[
                    parameters["preprocessing"]]
            parameters["masks_location"] = path.join(
                caps_directory, "masks", f"tpl-{parameters['roi_template']}")
            if len(parameters["roi_list"]) == 0:
                raise ValueError("A list of regions must be given.")
            else:
                check_mask_list(
                    parameters["masks_location"],
                    parameters["roi_list"],
                    parameters["roi_custom_mask_pattern"],
                    None if parameters["use_uncropped_image"] is None else
                    not parameters["use_uncropped_image"],
                )
            output_mode = extract_roi(
                file,
                masks_location=parameters["masks_location"],
                mask_pattern=parameters["roi_custom_mask_pattern"],
                cropped_input=None if parameters["use_uncropped_image"] is None
                else not parameters["use_uncropped_image"],
                roi_list=parameters["roi_list"],
                uncrop_output=parameters["uncropped_roi"],
            )
            logger.debug(f"    ROI extracted.")
        # Write the extracted tensor on a .pt file
        for tensor in output_mode:
            output_file_dir = path.join(
                caps_directory,
                container,
                "deeplearning_prepare_data",
                subfolder,
                mod_subfolder,
            )
            if not path.exists(output_file_dir):
                os.makedirs(output_file_dir)
            output_file = path.join(output_file_dir, tensor[0])
            save_tensor(tensor[1], output_file)
            logger.debug(f"    Output tensor saved at {output_file}")

    # Save parameters dictionary
    preprocessing_json_path = write_preprocessing(parameters, caps_directory)
    logger.info(f"Preprocessing JSON saved at {preprocessing_json_path}.")
Ejemplo n.º 25
0
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
    This preprocessing pipeline includes globally three steps:
    1) N4 bias correction (performed with ANTS).
    2) Linear registration to MNI (MNI icbm152 nlinear sym template)
       (performed with ANTS) - RegistrationSynQuick.
    3) Cropping the background (in order to save computational power).
    4) Histogram-based intensity normalization. This is a custom function
       performed by the binary ImageMath included with ANTS. 

    Parameters
    ----------
    bids_directory: str
       Folder with BIDS structure.
    caps_directory: str
       Folder where CAPS structure will be stored.
    working_directory: str
       Folder containing a temporary space to save intermediate results.
   """

    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinicadl.tools.inputs.input import fetch_file
    from os.path import dirname, join, abspath, split, exists
    from os import pardir

    check_bids_folder(bids_directory)
    input_dir = bids_directory
    is_bids_dir = True
    base_dir = working_directory

    root = dirname(abspath(join(abspath(__file__), pardir)))
    path_to_mask = join(root, 'resources', 'masks')
    ref_template = join(path_to_mask, 'mni_icbm152_t1_tal_nlin_sym_09c.nii')
    ref_crop = join(path_to_mask, 'ref_cropped_template.nii.gz')
    url1 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/ref_cropped_template.nii.gz"
    url2 = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/mni_icbm152_t1_tal_nlin_sym_09c.nii"
    if not (exists(ref_template)):
        try:
            fetch_file(url2, ref_template)
        except IOError as err:
            print(
                'Unable to download required template (mni_icbm152) for processing:',
                err)

    if not (exists(ref_crop)):
        try:
            fetch_file(url1, ref_crop)
        except IOError as err:
            print(
                'Unable to download required template (ref_crop) for processing:',
                err)

    sessions, subjects = get_subject_session_list(input_dir, tsv, is_bids_dir,
                                                  False, base_dir)

    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants
    from clinica.utils.filemanip import get_subject_id

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects, sessions, bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(
            e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
       Returns:
       A list of (string) input fields name.
       """
        return ['t1w']

    read_node = npe.Node(
        name="ReadingFiles",
        iterables=[
            ('t1w', t1w_files),
        ],
        synchronize=True,
        interface=nutil.IdentityInterface(fields=get_input_fields()))

    image_id_node = npe.Node(interface=nutil.Function(
        input_names=['bids_or_caps_file'],
        output_names=['image_id'],
        function=get_subject_id),
                             name='ImageID')

    ## The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(name='n4biascorrection',
                                interface=ants.N4BiasFieldCorrection(
                                    dimension=3,
                                    save_bias=True,
                                    bspline_fitting_distance=600))

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(name='antsRegistrationSynQuick',
                                      interface=ants.RegistrationSynQuick())
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(name='cropnifti',
                         interface=nutil.Function(
                             function=crop_nifti,
                             input_names=['input_img', 'ref_crop'],
                             output_names=['output_img', 'crop_template']))
    cropnifti.inputs.ref_crop = ref_crop

    #### Deprecrecated ####
    #### This step was not used in the final version ####
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

    #   from .T1_linear_utils import ants_histogram_intensity_normalization
    #
    #   ## histogram-based intensity normalization
    #   intensitynorm = npe.Node(
    #           name='intensitynormalization',
    #           interface=nutil.Function(
    #               input_names=['image_dimension', 'crop_template', 'input_img'],
    #               output_names=['output_img'],
    #               function=ants_histogram_intensity_normalization
    #               )
    #           )
    #   intensitynorm.inputs.image_dimension = 3

    ## DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(interface=nutil.Function(
        input_names=['image_id'],
        output_names=['image_id_out', 'subst_ls'],
        function=get_data_datasink),
                       name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(nutil.Function(
        input_names=['bids_or_caps_filename'],
        output_names=['container'],
        function=container_from_filename),
                              name='ContainerPath')

    write_node = npe.Node(name="WriteCaps", interface=DataSink())
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    ## Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')
                                                 ]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),
        (n4biascorrection, ants_registration_node, [('output_image',
                                                     'moving_image')]),
        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', 'affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'),
                                       'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        #(get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')
                                              ]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
    ])

    return wf
Ejemplo n.º 26
0
    def build_input_node(self):
        """Build and connect an input node to the pipelines.

        References:
            https://lcni.uoregon.edu/kb-articles/kb-0003

        """

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        import json
        import numpy as np
        from clinica.utils.stream import cprint
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        import clinica.utils.input_files as input_files

        # Reading BIDS files
        # ==================
        read_node = npe.Node(name="ReadingBIDS",
                             interface=nutil.IdentityInterface(
                                 fields=self.get_input_fields(),
                                 mandatory_inputs=True))

        # Store all the potentials errors
        all_errors = []
        if ('unwarping' in self.parameters) and self.parameters['unwarping']:
            # Magnitude 1 file
            try:
                read_node.inputs.magnitude1 = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_MAGNITUDE1_NII)
            except ClinicaException as e:
                all_errors.append(e)

            # Phasediff file
            try:
                read_node.inputs.phasediff = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_PHASEDIFF_NII)
            except ClinicaException as e:
                all_errors.append(e)

        # Bold files
        try:
            read_node.inputs.bold = clinica_file_reader(
                self.subjects, self.sessions, self.bids_directory,
                input_files.FMRI_BOLD_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # T1w-MRI files
        try:
            read_node.inputs.T1w = clinica_file_reader(self.subjects,
                                                       self.sessions,
                                                       self.bids_directory,
                                                       input_files.T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # Reading BIDS json
        # =================

        read_node.inputs.et = []
        read_node.inputs.blipdir = []
        read_node.inputs.tert = []
        read_node.inputs.time_repetition = []
        read_node.inputs.num_slices = []
        read_node.inputs.slice_order = []
        read_node.inputs.ref_slice = []
        read_node.inputs.time_acquisition = []

        if self.parameters['unwarping']:
            # From phasediff json file
            try:
                phasediff_json = clinica_file_reader(
                    self.subjects, self.sessions, self.bids_directory,
                    input_files.FMAP_PHASEDIFF_JSON)
                for json_f in phasediff_json:
                    with open(json_f) as json_file:
                        data = json.load(json_file)
                        # SPM echo times
                        read_node.inputs.et.append(
                            [data['EchoTime1'], data['EchoTime2']])
                        # SPM blip direction
                        # TODO: Verifiy that it is the correct way to get the
                        # blipdir
                        blipdir_raw = data['PhaseEncodingDirection']
                        if len(blipdir_raw) > 1 and blipdir_raw[1] == '-':
                            read_node.inputs.blipdir.append(-1)
                        else:
                            read_node.inputs.blipdir.append(1)
            except ClinicaException as e:
                all_errors.append(e)

        # From func json file
        try:
            func_json = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory,
                                            input_files.FMRI_BOLD_JSON)

            for json_f in func_json:
                with open(json_f) as json_file:
                    data = json.load(json_file)
                    # SPM Total readout time
                    read_node.inputs.tert.append(
                        1 / data['BandwidthPerPixelPhaseEncode'])
                    # SPM Repetition time
                    read_node.inputs.time_repetition.append(
                        data['RepetitionTime'])
                    # Number of slices
                    slice_timing = data['SliceTiming']
                    read_node.inputs.num_slices.append(len(slice_timing))
                    # Slice order
                    slice_order = np.argsort(slice_timing) + 1
                    read_node.inputs.slice_order.append(slice_order.tolist())
                    read_node.inputs.ref_slice.append(
                        np.argmin(slice_timing) + 1)
                    read_node.inputs.time_acquisition.append(
                        data['RepetitionTime'] -
                        data['RepetitionTime'] / float(len(slice_timing)))
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaBIDSError(error_message)

        if ('unwarping' in self.parameters) and self.parameters['unwarping']:
            self.connect([
                # Reading BIDS json
                (read_node, self.input_node, [('et', 'et')]),
                (read_node, self.input_node, [('blipdir', 'blipdir')]),
                (read_node, self.input_node, [('tert', 'tert')]),
                # Reading BIDS files
                (read_node, self.input_node, [('phasediff', 'phasediff')]),
                (read_node, self.input_node, [('magnitude1', 'magnitude1')]),
            ])

        self.connect([
            # Reading BIDS json
            (read_node, self.input_node, [('time_repetition',
                                           'time_repetition')]),
            (read_node, self.input_node, [('num_slices', 'num_slices')]),
            (read_node, self.input_node, [('slice_order', 'slice_order')]),
            (read_node, self.input_node, [('ref_slice', 'ref_slice')]),
            (read_node, self.input_node, [('time_acquisition',
                                           'time_acquisition')]),
            # Reading BIDS files
            (read_node, self.input_node, [('bold', 'bold')]),
            (read_node, self.input_node, [('T1w', 'T1w')]),
        ])
Ejemplo n.º 27
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.stream import cprint
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.inputs import clinica_file_reader
        import clinica.utils.input_files as input_files
        import re

        all_errors = []

        # Inputs from t1-freesurfer pipeline
        # ==================================

        # White matter segmentation
        try:
            wm_mask_files = clinica_file_reader(self.subjects, self.sessions,
                                                self.caps_directory,
                                                input_files.T1_FS_WM)
        except ClinicaException as e:
            all_errors.append(e)

        # Desikan parcellation
        try:
            aparc_aseg_files = clinica_file_reader(self.subjects,
                                                   self.sessions,
                                                   self.caps_directory,
                                                   input_files.T1_FS_DESIKAN)
        except ClinicaException as e:
            all_errors.append(e)

        # Destrieux parcellation
        try:
            aparc_aseg_a2009s_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                input_files.T1_FS_DESTRIEUX)
        except ClinicaException as e:
            all_errors.append(e)

        # Inputs from dwi-preprocessing pipeline
        # ======================================
        # Preprocessed DWI
        try:
            dwi_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.caps_directory,
                                            input_files.DWI_PREPROC_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # B0 brainmask
        try:
            dwi_brainmask_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                input_files.DWI_PREPROC_BRAINMASK)
        except ClinicaException as e:
            all_errors.append(e)

        # Preprocessed bvec
        try:
            bvec_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVEC)
        except ClinicaException as e:
            all_errors.append(e)

        # Preprocessed bval
        try:
            bval_files = clinica_file_reader(self.subjects, self.sessions,
                                             self.caps_directory,
                                             input_files.DWI_PREPROC_BVAL)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        # Check space of DWI dataset
        dwi_file_spaces = [
            re.search('.*_space-(.*)_preproc.nii.*', file,
                      re.IGNORECASE).group(1) for file in dwi_files
        ]

        # Return an error if all the DWI files are not in the same space
        if any(a != dwi_file_spaces[0] for a in dwi_file_spaces):
            raise ClinicaCAPSError(
                'Preprocessed DWI files are not all in the '
                'same space. Please process them separately '
                'using the appropriate subjects/sessions '
                '`.tsv` file (-tsv option).')

        # Used only for for T1-B0 registration
        if dwi_file_spaces[0] == 'b0':
            # Brain extracted T1w
            t1_brain_files = clinica_file_reader(self.subjects, self.sessions,
                                                 self.caps_directory,
                                                 input_files.T1_FS_BRAIN)

        list_atlas_files = [[aparc_aseg, aparc_aseg_a2009]
                            for aparc_aseg, aparc_aseg_a2009 in zip(
                                aparc_aseg_files, aparc_aseg_a2009s_files)]

        list_grad_fsl = [(bvec, bval)
                         for bvec, bval in zip(bvec_files, bval_files)]

        p_id_images_to_process = [
            re.search(r'(sub-[a-zA-Z0-9]+)', caps_file).group()
            for caps_file in dwi_files
        ]
        s_id_images_to_process = [
            re.search(r'(ses-[a-zA-Z0-9]+)', caps_file).group()
            for caps_file in dwi_files
        ]
        images_to_process = ', '.join(
            p_id[4:] + '|' + s_id[4:] for p_id, s_id in zip(
                p_id_images_to_process, s_id_images_to_process))
        cprint('The pipeline will be run on the following subject(s): %s' %
               images_to_process)

        if dwi_file_spaces[0] == 'b0':
            self.parameters['dwi_space'] = 'b0'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[('wm_mask_file', wm_mask_files),
                                            ('t1_brain_file', t1_brain_files),
                                            ('dwi_file', dwi_files),
                                            ('dwi_brainmask_file',
                                             dwi_brainmask_files),
                                            ('grad_fsl', list_grad_fsl),
                                            ('atlas_files', list_atlas_files)],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                     fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('t1_brain_file',
                                               't1_brain_file')]),
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')
                                              ]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file',
                                               'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        elif dwi_file_spaces[0] == 'T1w':
            self.parameters['dwi_space'] = 'T1w'
            read_node = npe.Node(name="ReadingFiles",
                                 iterables=[('wm_mask_file', wm_mask_files),
                                            ('dwi_file', dwi_files),
                                            ('dwi_brainmask_file',
                                             dwi_brainmask_files),
                                            ('grad_fsl', list_grad_fsl),
                                            ('atlas_files', list_atlas_files)],
                                 synchronize=True,
                                 interface=nutil.IdentityInterface(
                                     fields=self.get_input_fields()))
            self.connect([
                (read_node, self.input_node, [('wm_mask_file', 'wm_mask_file')
                                              ]),
                (read_node, self.input_node, [('dwi_file', 'dwi_file')]),
                (read_node, self.input_node, [('dwi_brainmask_file',
                                               'dwi_brainmask_file')]),
                (read_node, self.input_node, [('grad_fsl', 'grad_fsl')]),
                (read_node, self.input_node, [('atlas_files', 'atlas_files')]),
            ])

        else:
            raise ClinicaCAPSError('Bad preprocessed DWI space. Please check '
                                   'your CAPS folder.')
Ejemplo n.º 28
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import (
            ClinicaBIDSError,
            ClinicaCAPSError,
            ClinicaException,
        )
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import (
            T1W_NII,
            T1W_TO_MNI_TRANSFROM,
            bids_pet_nii,
        )
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.pet import get_suvr_mask
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # from clinica.iotools.utils.data_handling import check_volume_location_in_world_coordinate_system
        # Import references files
        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )
        FILE2 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )

        self.ref_template = join(path_to_mask, FILE1.filename)
        self.ref_crop = join(path_to_mask, FILE2.filename)
        self.ref_mask = get_suvr_mask(self.parameters["suvr_reference_region"])

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )
        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Inputs from BIDS directory
        # pet file:
        PET_NII = bids_pet_nii(self.parameters["acq_label"])
        try:
            pet_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, PET_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read pet files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read t1w files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # Inputs from t1-linear pipeline
        # Transformation files from T1w files to MNI:
        try:
            t1w_to_mni_transformation_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                T1W_TO_MNI_TRANSFROM)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read transformation files in your CAPS directory.\n"
                + str(e))
            raise ClinicaCAPSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 3 minutes per image.")

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            iterables=[
                ("t1w", t1w_files),
                ("pet", pet_files),
                ("t1w_to_mni", t1w_to_mni_transformation_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        # fmt: off
        self.connect([
            (read_input_node, self.input_node, [("t1w", "t1w")]),
            (read_input_node, self.input_node, [("pet", "pet")]),
            (read_input_node, self.input_node, [("t1w_to_mni", "t1w_to_mni")]),
        ])
Ejemplo n.º 29
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from os.path import exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.iotools.utils.data_handling import (
            check_relative_volume_location_in_world_coordinate_system, )
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.filemanip import save_participants_sessions
        from clinica.utils.input_files import (
            T1W_NII,
            bids_pet_nii,
            t1_volume_deformation_to_template,
            t1_volume_final_group_template,
            t1_volume_native_tpm,
            t1_volume_native_tpm_in_mni,
        )
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.pet import get_suvr_mask, read_psf_information
        from clinica.utils.stream import cprint
        from clinica.utils.ux import (
            print_groups_in_caps_directory,
            print_images_to_process,
        )

        # Check that group already exists
        if not exists(
                join(self.caps_directory, "groups",
                     f"group-{self.parameters['group_label']}")):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                f"Group {self.parameters['group_label']} does not exist. "
                "Did you run t1-volume or t1-volume-create-dartel pipeline?")

        # Tissues DataGrabber
        # ====================
        all_errors = []

        # Grab reference mask
        reference_mask_file = get_suvr_mask(
            self.parameters["suvr_reference_region"])

        # PET from BIDS directory
        try:
            pet_bids = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.bids_directory,
                bids_pet_nii(self.parameters["acq_label"]),
            )
        except ClinicaException as e:
            all_errors.append(e)

        # Native T1w-MRI
        try:
            t1w_bids = clinica_file_reader(self.subjects, self.sessions,
                                           self.bids_directory, T1W_NII)
        except ClinicaException as e:
            all_errors.append(e)

        # mask_tissues
        tissues_input = []
        for tissue_number in self.parameters["mask_tissues"]:
            try:
                current_file = clinica_file_reader(
                    self.subjects,
                    self.sessions,
                    self.caps_directory,
                    t1_volume_native_tpm_in_mni(tissue_number, False),
                )
                tissues_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)
        # Tissues_input has a length of len(self.parameters['mask_tissues']). Each of these elements has a size of
        # len(self.subjects). We want the opposite: a list of size len(self.subjects) whose elements have a size of
        # len(self.parameters['mask_tissues']. The trick is to iter on elements with zip(*my_list)
        tissues_input_final = []
        for subject_tissue_list in zip(*tissues_input):
            tissues_input_final.append(subject_tissue_list)
        tissues_input = tissues_input_final

        # Flowfields
        try:
            flowfields_caps = clinica_file_reader(
                self.subjects,
                self.sessions,
                self.caps_directory,
                t1_volume_deformation_to_template(
                    self.parameters["group_label"]),
            )
        except ClinicaException as e:
            all_errors.append(e)

        # Dartel Template
        try:
            final_template = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters["group_label"]),
            )
        except ClinicaException as e:
            all_errors.append(e)

        if self.parameters["pvc_psf_tsv"] is not None:
            iterables_psf = read_psf_information(
                self.parameters["pvc_psf_tsv"],
                self.subjects,
                self.sessions,
                self.parameters["acq_label"],
            )
            self.parameters["apply_pvc"] = True
        else:
            iterables_psf = [[]] * len(self.subjects)
            self.parameters["apply_pvc"] = False

        if self.parameters["apply_pvc"]:
            # pvc tissues input
            pvc_tissues_input = []
            for tissue_number in self.parameters["pvc_mask_tissues"]:
                try:
                    current_file = clinica_file_reader(
                        self.subjects,
                        self.sessions,
                        self.caps_directory,
                        t1_volume_native_tpm(tissue_number),
                    )
                    pvc_tissues_input.append(current_file)
                except ClinicaException as e:
                    all_errors.append(e)

            if len(all_errors) == 0:
                pvc_tissues_input_final = []
                for subject_tissue_list in zip(*pvc_tissues_input):
                    pvc_tissues_input_final.append(subject_tissue_list)
                pvc_tissues_input = pvc_tissues_input_final
        else:
            pvc_tissues_input = []

        if len(all_errors) > 0:
            error_message = "Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaException(error_message)

        check_relative_volume_location_in_world_coordinate_system(
            "T1w-MRI",
            t1w_bids,
            self.parameters["acq_label"] + " PET",
            pet_bids,
            self.bids_directory,
            self.parameters["acq_label"],
            skip_question=self.parameters["skip_question"],
        )

        # Save subjects to process in <WD>/<Pipeline.name>/participants.tsv
        folder_participants_tsv = os.path.join(self.base_dir, self.name)
        save_participants_sessions(self.subjects, self.sessions,
                                   folder_participants_tsv)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("List available in %s" %
                   os.path.join(folder_participants_tsv, "participants.tsv"))
            cprint(
                "The pipeline will last approximately 10 minutes per image.")

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
            iterables=[
                ("pet_image", pet_bids),
                ("t1_image_native", t1w_bids),
                ("mask_tissues", tissues_input),
                ("psf", iterables_psf),
                ("flow_fields", flowfields_caps),
                ("pvc_mask_tissues", pvc_tissues_input),
            ],
            synchronize=True,
        )

        read_input_node.inputs.reference_mask = reference_mask_file
        read_input_node.inputs.dartel_template = final_template

        # fmt: off
        self.connect([(read_input_node, self.input_node,
                       [("pet_image", "pet_image"),
                        ("t1_image_native", "t1_image_native"),
                        ("mask_tissues", "mask_tissues"),
                        ("flow_fields", "flow_fields"),
                        ("dartel_template", "dartel_template"),
                        ("reference_mask", "reference_mask"), ("psf", "psf"),
                        ("pvc_mask_tissues", "pvc_mask_tissues")])])
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader, clinica_group_reader
        from clinica.utils.input_files import (
            t1_volume_final_group_template, t1_volume_native_tpm,
            t1_volume_deformation_to_template)
        from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process

        # Check that group already exists
        if not os.path.exists(
                os.path.join(self.caps_directory, 'groups',
                             'group-' + self.parameters['group_id'])):
            print_groups_in_caps_directory(self.caps_directory)
            raise ClinicaException(
                '%sGroup %s does not exist. Did you run t1-volume or t1-volume-create-dartel pipeline?%s'
                % (Fore.RED, self.parameters['group_id'], Fore.RESET))

        all_errors = []
        read_input_node = npe.Node(name="LoadingCLIArguments",
                                   interface=nutil.IdentityInterface(
                                       fields=self.get_input_fields(),
                                       mandatory_inputs=True))

        # Segmented Tissues
        # =================
        tissues_input = []
        for tissue_number in self.parameters['tissues']:
            try:
                native_space_tpm = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_native_tpm(tissue_number))
                tissues_input.append(native_space_tpm)
            except ClinicaException as e:
                all_errors.append(e)
        # Tissues_input has a length of len(self.parameters['mask_tissues']). Each of these elements has a size of
        # len(self.subjects). We want the opposite : a list of size len(self.subjects) whose elements have a size of
        # len(self.parameters['mask_tissues']. The trick is to iter on elements with zip(*my_list)
        tissues_input_rearranged = []
        for subject_tissue_list in zip(*tissues_input):
            tissues_input_rearranged.append(subject_tissue_list)

        read_input_node.inputs.native_segmentations = tissues_input_rearranged

        # Flow Fields
        # ===========
        try:
            read_input_node.inputs.flowfield_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                t1_volume_deformation_to_template(self.parameters['group_id']))
        except ClinicaException as e:
            all_errors.append(e)

        # Dartel Template
        # ================
        try:
            read_input_node.inputs.template_file = clinica_group_reader(
                self.caps_directory,
                t1_volume_final_group_template(self.parameters['group_id']))
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaCAPSError(error_message)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last a few minutes per image.')

        self.connect([(read_input_node, self.input_node,
                       [('native_segmentations', 'native_segmentations')]),
                      (read_input_node, self.input_node,
                       [('flowfield_files', 'flowfield_files')]),
                      (read_input_node, self.input_node, [('template_file',
                                                           'template_file')])])