예제 #1
0
    def get_data_dictionary(path_to_clinical_data_folder):
        """Temporary function to get DataDictionary_NIFD_2017.10.18.xlsx file.

        See https://github.com/aramis-lab/clinica/issues/122 for details.

        Args:
            path_to_clinical_data_folder: Path to clinical data folder.

        Returns:
            Path to 'DataDictionary_NIFD_2017.10.18.xlsx' file.
        """
        import os

        from clinica.utils.inputs import RemoteFileStructure, get_file_from_server

        local_nifd_dictionary = os.path.join(
            path_to_clinical, "DataDictionary_NIFD_2017.10.18.xlsx")
        if os.path.exists(local_nifd_dictionary):
            path_to_nifd_dictionary = local_nifd_dictionary
        else:
            NIFD_DICTIONNARY = RemoteFileStructure(
                filename="DataDictionary_NIFD_2017.10.18.xlsx",
                url=
                "https://aramislab.paris.inria.fr/files/data/databases/converters/",
                checksum=
                "e75b23a9f4dad601463f48031cfc00e1180e4877d0bebbdfd340fdbcbacab5cb",
            )
            path_to_nifd_dictionary = get_file_from_server(NIFD_DICTIONNARY)

        return path_to_nifd_dictionary
예제 #2
0
    def get_atlas_labels():
        from clinica.utils.inputs import RemoteFileStructure, get_file_from_server

        NEUROMORPHOMETRICS_PARC = RemoteFileStructure(
            filename="atlas-Neuromorphometrics_dseg.nii.gz",
            url=
            "https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/",
            checksum=
            "19a50136cd2f8a14357a19ad8a1dc4a2ecb6beb3fc16cb5441f4f2ebaf64a9a5",
        )
        return get_file_from_server(NEUROMORPHOMETRICS_PARC)
예제 #3
0
    def get_atlas_labels():
        from clinica.utils.inputs import RemoteFileStructure, get_file_from_server

        LPBA40_PARC = RemoteFileStructure(
            filename="atlas-LPBA40_dseg.nii.gz",
            url=
            "https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/",
            checksum=
            "20826b572bbbdbcdbf28bbd3801dc0c2fed28d1e54bc4fd5027e64ccc6d50374",
        )
        return get_file_from_server(LPBA40_PARC)
예제 #4
0
    def get_atlas_labels():
        from clinica.utils.inputs import RemoteFileStructure, get_file_from_server

        HAMMERS_PARC = RemoteFileStructure(
            filename="atlas-Hammers_dseg.nii.gz",
            url=
            "https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/",
            checksum=
            "c034a7bce2dcab390a0b72f4e7d04769eb3fe5b990d0e18d89b0ce73339a5376",
        )
        return get_file_from_server(HAMMERS_PARC)
예제 #5
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline."""
        import os
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import RemoteFileStructure
        from .t1_extensive_utils import (get_caps_filename, apply_binary_mask,
                                         get_file_from_server)

        # Get CAPS Filename
        # =================
        caps_filename = npe.Node(
            name="0-GetCapsFilename",
            interface=nutil.Function(
                input_names="norm_t1w",
                output_names=self.get_output_fields(),
                function=get_caps_filename,
            ),
        )

        # Apply brainmask
        # ===============
        ICV_MASK = RemoteFileStructure(
            filename="tpl-IXI549Space_desc-ICV_mask.nii.gz",
            url=
            "https://aramislab.paris.inria.fr/files/data/masks/tpl-IXI549Space/",
            checksum=
            "1daebcae52218d48e4bd79328754d2e6415f80331c8b87f39ed289c4f4ec810a",
        )

        skull_stripping = npe.Node(
            name="1-SkullStripping",
            interface=nutil.Function(
                input_names=["input_img", "binary_img", "output_filename"],
                output_names=["masked_image_path"],
                function=apply_binary_mask,
            ),
        )
        skull_stripping.inputs.binary_img = get_file_from_server(
            ICV_MASK, os.path.join("clinicadl", "t1-extensive"))

        # Connection
        # ==========
        self.connect([
            (self.input_node, caps_filename, [("norm_t1w", "norm_t1w")]),
            (self.input_node, skull_stripping, [("norm_t1w", "input_img")]),
            (caps_filename, skull_stripping, [("skull_stripped_t1w",
                                               "output_filename")]),
            (skull_stripping, self.output_node, [("masked_image_path",
                                                  "skull_stripped_t1w")]),
        ])
예제 #6
0
def quality_check(
    caps_dir,
    output_path,
    tsv_path=None,
    threshold=0.5,
    batch_size=1,
    num_workers=0,
    gpu=True,
):

    logger = getLogger("clinicadl")

    if splitext(output_path)[1] != ".tsv":
        raise ValueError("Please provide an output path to a tsv file")

    # Fetch QC model
    home = str(Path.home())
    cache_clinicadl = join(home, ".cache", "clinicadl", "models")
    url_aramis = "https://aramislab.paris.inria.fr/files/data/models/dl/qc/"
    logger.info("Downloading quality check model.")
    FILE1 = RemoteFileStructure(
        filename="resnet18.pth.tar",
        url=url_aramis,
        checksum=
        "a97a781be3820b06424fe891ec405c78b87ad51a27b6b81614dbdb996ce60104",
    )

    makedirs(cache_clinicadl, exist_ok=True)

    model_file = join(cache_clinicadl, FILE1.filename)

    if not (exists(model_file)):
        try:
            model_file = fetch_file(FILE1, cache_clinicadl)
        except IOError as err:
            print("Unable to download required model for QC process:", err)

    # Load QC model
    logger.debug("Loading quality check model.")
    model = resnet_qc_18()
    model.load_state_dict(torch.load(model_file))
    model.eval()
    if gpu:
        logger.debug("Working on GPU.")
        model.cuda()

    # Transform caps_dir in dict
    caps_dict = CapsDataset.create_caps_dict(caps_dir, multi_cohort=False)

    # Load DataFrame
    logger.debug("Loading data to check.")
    df = load_and_check_tsv(tsv_path, caps_dict, dirname(abspath(output_path)))

    dataset = QCDataset(caps_dir, df)
    dataloader = DataLoader(dataset,
                            num_workers=num_workers,
                            batch_size=batch_size,
                            pin_memory=True)

    columns = ["participant_id", "session_id", "pass_probability", "pass"]
    qc_df = pd.DataFrame(columns=columns)
    softmax = torch.nn.Softmax(dim=1)
    logger.info(
        f"Quality check will be performed over {len(dataloader)} images.")

    for data in dataloader:
        logger.debug(f"Processing subject {data['participant_id']}.")
        inputs = data["image"]
        if gpu:
            inputs = inputs.cuda()
        outputs = softmax.forward(model(inputs))

        for idx, sub in enumerate(data["participant_id"]):
            pass_probability = outputs[idx, 1].item()
            row = [[
                sub,
                data["session_id"][idx],
                pass_probability,
                pass_probability > threshold,
            ]]
            logger.debug(f"Quality score is {pass_probability}.")
            row_df = pd.DataFrame(row, columns=columns)
            qc_df = qc_df.append(row_df)

    qc_df.sort_values("pass_probability", ascending=False, inplace=True)
    qc_df.to_csv(output_path, sep="\t", index=False)
    logger.info(f"Results are stored at {output_path}.")
예제 #7
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline.
        """
        from os import pardir
        from os.path import dirname, join, abspath, exists
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import fetch_file, RemoteFileStructure
        from clinica.utils.ux import print_images_to_process
        from clinica.utils.stream import cprint

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, 'resources', 'masks')
        url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
        FILE1 = RemoteFileStructure(
            filename='ref_cropped_template.nii.gz',
            url=url_aramis,
            checksum=
            '67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3')
        FILE2 = RemoteFileStructure(
            filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
            url=url_aramis,
            checksum=
            '93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34')

        self.ref_template = join(path_to_mask, FILE2.filename)
        self.ref_crop = join(path_to_mask, FILE1.filename)

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (mni_icbm152) for processing:',
                    err)

        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (ref_crop) for processing:',
                    err)

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(
                "%sClinica found %s image(s) already processed in CAPS directory:%s"
                % (Fore.YELLOW, len(processed_ids), Fore.RESET))
            for image_id in processed_ids:
                cprint("%s\t%s%s" %
                       (Fore.YELLOW, image_id.replace('_', ' | '), Fore.RESET))
            cprint("%s\nImage(s) will be ignored by Clinica.\n%s" %
                   (Fore.YELLOW, Fore.RESET))
            input_ids = [
                p_id + '_' + s_id
                for p_id, s_id in zip(self.subjects, self.sessions)
            ]
            to_process_ids = list(set(input_ids) - set(processed_ids))
            self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n' + str(
                e)
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint('The pipeline will last approximately 6 minutes per image.')

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('t1w', t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()))
        self.connect([
            (read_node, self.input_node, [('t1w', 't1w')]),
        ])
예제 #8
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import (
            ClinicaBIDSError,
            ClinicaCAPSError,
            ClinicaException,
        )
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import (
            T1W_NII,
            T1W_TO_MNI_TRANSFROM,
            bids_pet_nii,
        )
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.pet import get_suvr_mask
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        # from clinica.iotools.utils.data_handling import check_volume_location_in_world_coordinate_system
        # Import references files
        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )
        FILE2 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )

        self.ref_template = join(path_to_mask, FILE1.filename)
        self.ref_crop = join(path_to_mask, FILE2.filename)
        self.ref_mask = get_suvr_mask(self.parameters["suvr_reference_region"])

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )
        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Inputs from BIDS directory
        # pet file:
        PET_NII = bids_pet_nii(self.parameters["acq_label"])
        try:
            pet_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, PET_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read pet files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read t1w files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        # Inputs from t1-linear pipeline
        # Transformation files from T1w files to MNI:
        try:
            t1w_to_mni_transformation_files = clinica_file_reader(
                self.subjects, self.sessions, self.caps_directory,
                T1W_TO_MNI_TRANSFROM)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read transformation files in your CAPS directory.\n"
                + str(e))
            raise ClinicaCAPSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 3 minutes per image.")

        read_input_node = npe.Node(
            name="LoadingCLIArguments",
            iterables=[
                ("t1w", t1w_files),
                ("pet", pet_files),
                ("t1w_to_mni", t1w_to_mni_transformation_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        # fmt: off
        self.connect([
            (read_input_node, self.input_node, [("t1w", "t1w")]),
            (read_input_node, self.input_node, [("pet", "pet")]),
            (read_input_node, self.input_node, [("t1w_to_mni", "t1w_to_mni")]),
        ])
예제 #9
0
def quality_check(caps_dir,
                  output_path,
                  preprocessing,
                  tsv_path=None,
                  threshold=0.5,
                  batch_size=1,
                  num_workers=0,
                  gpu=True):
    if preprocessing != "t1-linear":
        raise NotImplementedError(
            "The quality check procedure implemented in clinicadl is meant to be run "
            "on t1-linear preprocessing only.")

    if splitext(output_path)[1] != ".tsv":
        raise ValueError("Please provide an output path to a tsv file")

    # Fetch QC model
    home = str(Path.home())
    cache_clinicadl = join(home, '.cache', 'clinicadl', 'models')
    url_aramis = 'https://aramislab.paris.inria.fr/files/data/models/dl/qc/'
    FILE1 = RemoteFileStructure(
        filename='resnet18.pth.tar',
        url=url_aramis,
        checksum=
        'a97a781be3820b06424fe891ec405c78b87ad51a27b6b81614dbdb996ce60104')

    makedirs(cache_clinicadl, exist_ok=True)

    model_file = join(cache_clinicadl, FILE1.filename)

    if not (exists(model_file)):
        try:
            model_file = fetch_file(FILE1, cache_clinicadl)
        except IOError as err:
            print('Unable to download required model for QC process:', err)

    # Load QC model
    model = resnet_qc_18()
    model.load_state_dict(torch.load(model_file))
    model.eval()
    if gpu:
        model.cuda()

    # Load DataFrame
    df = load_and_check_tsv(tsv_path, caps_dir, dirname(abspath(output_path)))

    dataset = QCDataset(caps_dir, df)
    dataloader = DataLoader(dataset,
                            num_workers=num_workers,
                            batch_size=batch_size,
                            pin_memory=True)

    columns = ['participant_id', 'session_id', 'pass_probability', 'pass']
    qc_df = pd.DataFrame(columns=columns)
    softmax = torch.nn.Softmax(dim=1)

    for data in dataloader:
        inputs = data['image']
        if gpu:
            inputs = inputs.cuda()
        outputs = softmax.forward(model(inputs))

        for idx, sub in enumerate(data['participant_id']):
            pass_probability = outputs[idx, 1].item()
            row = [[
                sub, data['session_id'][idx], pass_probability,
                pass_probability > threshold
            ]]
            row_df = pd.DataFrame(row, columns=columns)
            qc_df = qc_df.append(row_df)

    qc_df.sort_values("pass_probability", ascending=False, inplace=True)
    qc_df.to_csv(output_path, sep='\t', index=False)
예제 #10
0
def generate_trivial_dataset(caps_dir,
                             output_dir,
                             n_subjects,
                             tsv_path=None,
                             preprocessing="linear",
                             mask_path=None,
                             atrophy_percent=60):
    """
    Generates a fully separable dataset.

    Generates a dataset, based on the images of the CAPS directory, where a
    half of the image is processed using a mask to oclude a specific region.
    This procedure creates a dataset fully separable (images with half-right
    processed and image with half-left processed)

    Args:
        caps_dir: (str) path to the CAPS directory.
        output_dir: (str) folder containing the synthetic dataset in CAPS format.
        n_subjects: (int) number of subjects in each class of the synthetic
            dataset.
        tsv_path: (str) path to tsv file of list of subjects/sessions.
        preprocessing: (str) preprocessing performed. Must be in ['linear', 'extensive'].
        mask_path: (str) path to the extracted masks to generate the two labels.
        atrophy_percent: (float) percentage of atrophy applied.

    Returns:
        Folder structure where images are stored in CAPS format.

    Raises:
    """
    from pathlib import Path

    # Read DataFrame
    data_df = load_and_check_tsv(tsv_path, caps_dir, output_dir)
    data_df = baseline_df(data_df, "None")

    home = str(Path.home())
    cache_clinicadl = join(home, '.cache', 'clinicadl', 'ressources', 'masks')
    url_aramis = 'https://aramislab.paris.inria.fr/files/data/masks/'
    FILE1 = RemoteFileStructure(
        filename='AAL2.tar.gz',
        url=url_aramis,
        checksum=
        '89427970921674792481bffd2de095c8fbf49509d615e7e09e4bc6f0e0564471')
    makedirs(cache_clinicadl, exist_ok=True)

    if n_subjects > len(data_df):
        raise ValueError(
            "The number of subjects %i cannot be higher than the number of subjects in the baseline "
            "DataFrame extracted from %s" % (n_subjects, tsv_path))

    if mask_path is None:
        if not exists(join(cache_clinicadl, 'AAL2')):
            try:
                print('Try to download AAL2 masks')
                mask_path_tar = fetch_file(FILE1, cache_clinicadl)
                tar_file = tarfile.open(mask_path_tar)
                print('File: ' + mask_path_tar)
                try:
                    tar_file.extractall(cache_clinicadl)
                    tar_file.close()
                    mask_path = join(cache_clinicadl, 'AAL2')
                except RuntimeError:
                    print('Unable to extract donwloaded files')
            except IOError as err:
                print('Unable to download required templates:', err)
                raise ValueError(
                    '''Unable to download masks, please donwload them
                                  manually at https://aramislab.paris.inria.fr/files/data/masks/
                                  and provide a valid path.''')
        else:
            mask_path = join(cache_clinicadl, 'AAL2')

    # Create subjects dir
    makedirs(join(output_dir, 'subjects'), exist_ok=True)

    # Output tsv file
    columns = ['participant_id', 'session_id', 'diagnosis', 'age_bl', 'sex']
    output_df = pd.DataFrame(columns=columns)
    diagnosis_list = ["AD", "CN"]

    for i in range(2 * n_subjects):
        data_idx = i // 2
        label = i % 2

        participant_id = data_df.loc[data_idx, "participant_id"]
        session_id = data_df.loc[data_idx, "session_id"]
        filename = 'sub-TRIV%i_ses-M00' % i + FILENAME_TYPE[
            'cropped'] + '.nii.gz'
        path_image = join(output_dir, 'subjects', 'sub-TRIV%i' % i, 'ses-M00',
                          't1_linear')

        makedirs(path_image, exist_ok=True)

        image_path = find_image_path(caps_dir, participant_id, session_id,
                                     preprocessing)
        image_nii = nib.load(image_path)
        image = image_nii.get_data()

        atlas_to_mask = nib.load(join(mask_path,
                                      'mask-%i.nii' % (label + 1))).get_data()

        # Create atrophied image
        trivial_image = im_loss_roi_gaussian_distribution(
            image, atlas_to_mask, atrophy_percent)
        trivial_image_nii = nib.Nifti1Image(trivial_image,
                                            affine=image_nii.affine)
        trivial_image_nii.to_filename(join(path_image, filename))

        # Append row to output tsv
        row = ['sub-TRIV%i' % i, 'ses-M00', diagnosis_list[label], 60, 'F']
        row_df = pd.DataFrame([row], columns=columns)
        output_df = output_df.append(row_df)

    output_df.to_csv(join(output_dir, 'data.tsv'), sep='\t', index=False)

    missing_path = join(output_dir, "missing_mods")
    makedirs(missing_path, exist_ok=True)

    sessions = data_df.session_id.unique()
    for session in sessions:
        session_df = data_df[data_df.session_id == session]
        out_df = copy(session_df[["participant_id"]])
        out_df["synthetic"] = [1] * len(out_df)
        out_df.to_csv(join(missing_path, "missing_mods_%s.tsv" % session),
                      sep="\t",
                      index=False)
예제 #11
0
파일: generate.py 프로젝트: 14thibea/AD-DL
def generate_trivial_dataset(
    caps_directory: str,
    output_dir: str,
    n_subjects: int,
    tsv_path: Optional[str] = None,
    preprocessing: str = "t1-linear",
    mask_path: Optional[str] = None,
    atrophy_percent: float = 60,
    multi_cohort: bool = False,
    uncropped_image: bool = False,
    acq_label: str = "fdg",
    suvr_reference_region: str = "pons",
):
    """
    Generates a fully separable dataset.

    Generates a dataset, based on the images of the CAPS directory, where a
    half of the image is processed using a mask to occlude a specific region.
    This procedure creates a dataset fully separable (images with half-right
    processed and image with half-left processed)

    Args:
        caps_directory: path to the CAPS directory.
        output_dir: folder containing the synthetic dataset in CAPS format.
        n_subjects: number of subjects in each class of the synthetic dataset.
        tsv_path: path to tsv file of list of subjects/sessions.
        preprocessing: preprocessing performed. Must be in ['linear', 'extensive'].
        mask_path: path to the extracted masks to generate the two labels.
        atrophy_percent: percentage of atrophy applied.
        multi_cohort: If True caps_directory is the path to a TSV file linking cohort names and paths.
        uncropped_image: If True the uncropped image of `t1-linear` or `pet-linear` will be used.
        acq_label: name of the tracer when using `pet-linear` preprocessing.
        suvr_reference_region: name of the reference region when using `pet-linear` preprocessing.

    Returns:
        Folder structure where images are stored in CAPS format.

    Raises:
        ValueError: if `n_subjects` is higher than the length of the TSV file at `tsv_path`.
    """
    from pathlib import Path

    commandline_to_json(
        {
            "output_dir": output_dir,
            "caps_dir": caps_directory,
            "preprocessing": preprocessing,
            "n_subjects": n_subjects,
            "atrophy_percent": atrophy_percent,
        }
    )

    # Transform caps_directory in dict
    caps_dict = CapsDataset.create_caps_dict(caps_directory, multi_cohort=multi_cohort)

    # Read DataFrame
    data_df = load_and_check_tsv(tsv_path, caps_dict, output_dir)
    data_df = extract_baseline(data_df)

    home = str(Path.home())
    cache_clinicadl = join(home, ".cache", "clinicadl", "ressources", "masks")
    url_aramis = "https://aramislab.paris.inria.fr/files/data/masks/"
    FILE1 = RemoteFileStructure(
        filename="AAL2.tar.gz",
        url=url_aramis,
        checksum="89427970921674792481bffd2de095c8fbf49509d615e7e09e4bc6f0e0564471",
    )
    makedirs(cache_clinicadl, exist_ok=True)

    if n_subjects > len(data_df):
        raise ValueError(
            f"The number of subjects {n_subjects} cannot be higher "
            f"than the number of subjects in the baseline dataset of size {len(data_df)}"
        )

    if mask_path is None:
        if not exists(join(cache_clinicadl, "AAL2")):
            try:
                print("Try to download AAL2 masks")
                mask_path_tar = fetch_file(FILE1, cache_clinicadl)
                tar_file = tarfile.open(mask_path_tar)
                print("File: " + mask_path_tar)
                try:
                    tar_file.extractall(cache_clinicadl)
                    tar_file.close()
                    mask_path = join(cache_clinicadl, "AAL2")
                except RuntimeError:
                    print("Unable to extract downloaded files")
            except IOError as err:
                print("Unable to download required templates:", err)
                raise ValueError(
                    """Unable to download masks, please download them
                                  manually at https://aramislab.paris.inria.fr/files/data/masks/
                                  and provide a valid path."""
                )
        else:
            mask_path = join(cache_clinicadl, "AAL2")

    # Create subjects dir
    makedirs(join(output_dir, "subjects"), exist_ok=True)

    # Output tsv file
    columns = ["participant_id", "session_id", "diagnosis", "age_bl", "sex"]
    output_df = pd.DataFrame(columns=columns)
    diagnosis_list = ["AD", "CN"]

    # Find appropriate preprocessing file type
    file_type = find_file_type(
        preprocessing, uncropped_image, acq_label, suvr_reference_region
    )

    for i in range(2 * n_subjects):
        data_idx = i // 2
        label = i % 2

        participant_id = data_df.loc[data_idx, "participant_id"]
        session_id = data_df.loc[data_idx, "session_id"]
        cohort = data_df.loc[data_idx, "cohort"]
        image_paths = clinica_file_reader(
            [participant_id], [session_id], caps_dict[cohort], file_type
        )
        image_nii = nib.load(image_paths[0])
        image = image_nii.get_data()

        input_filename = basename(image_paths[0])
        filename_pattern = "_".join(input_filename.split("_")[2::])

        trivial_image_nii_dir = join(
            output_dir, "subjects", f"sub-TRIV{i}", session_id, preprocessing
        )
        trivial_image_nii_filename = f"sub-TRIV{i}_{session_id}_{filename_pattern}"

        makedirs(trivial_image_nii_dir, exist_ok=True)

        atlas_to_mask = nib.load(join(mask_path, f"mask-{label + 1}.nii")).get_data()

        # Create atrophied image
        trivial_image = im_loss_roi_gaussian_distribution(
            image, atlas_to_mask, atrophy_percent
        )
        trivial_image_nii = nib.Nifti1Image(trivial_image, affine=image_nii.affine)
        trivial_image_nii.to_filename(
            join(trivial_image_nii_dir, trivial_image_nii_filename)
        )
        print(join(trivial_image_nii_dir, trivial_image_nii_filename))

        # Append row to output tsv
        row = [f"sub-TRIV{i}", session_id, diagnosis_list[label], 60, "F"]
        row_df = pd.DataFrame([row], columns=columns)
        output_df = output_df.append(row_df)

    output_df.to_csv(join(output_dir, "data.tsv"), sep="\t", index=False)

    write_missing_mods(output_dir, output_df)
예제 #12
0
파일: utils.py 프로젝트: ravih18/AD-DL
def extract_metrics(caps_dir, output_dir, group_label):
    if not path.exists(output_dir):
        os.makedirs(output_dir)

    # Load eyes segmentation
    home = str(Path.home())
    cache_clinicadl = path.join(home, ".cache", "clinicadl", "segmentation")
    url_aramis = "https://aramislab.paris.inria.fr/files/data/template/"
    FILE1 = RemoteFileStructure(
        filename="eyes_segmentation.nii.gz",
        url=url_aramis,
        checksum=
        "56f699c06cafc62ad8bb5b41b188c7c412d684d810a11d6f4cbb441c0ce944ee",
    )

    if not (path.exists(cache_clinicadl)):
        os.makedirs(cache_clinicadl)

    segmentation_file = path.join(cache_clinicadl, FILE1.filename)

    if not (path.exists(segmentation_file)):
        try:
            segmentation_file = fetch_file(FILE1, cache_clinicadl)
        except IOError as err:
            raise IOError(
                "Unable to download required eyes segmentation for QC:", err)

    segmentation_nii = nib.load(segmentation_file)
    segmentation_np = segmentation_nii.get_fdata()

    # Get the GM template
    template_path = path.join(
        caps_dir,
        "groups",
        f"group-{group_label}",
        "t1",
        f"group-{group_label}_template.nii.gz",
    )
    template_nii = nib.load(template_path)
    template_np = template_nii.get_fdata()
    template_np = np.sum(template_np, axis=3)
    template_segmentation_np = template_np * segmentation_np

    # Get the data
    filename = path.join(output_dir, "QC_metrics.tsv")
    columns = [
        "participant_id",
        "session_id",
        "max_intensity",
        "non_zero_percentage",
        "frontal_similarity",
    ]
    results_df = pd.DataFrame()

    subjects = os.listdir(path.join(caps_dir, "subjects"))
    subjects = [subject for subject in subjects if subject[:4:] == "sub-"]
    for subject in subjects:
        subject_path = path.join(caps_dir, "subjects", subject)
        sessions = os.listdir(subject_path)
        sessions = [session for session in sessions if session[:4:] == "ses-"]
        for session in sessions:
            image_path = path.join(
                subject_path,
                session,
                "t1",
                "spm",
                "segmentation",
                "normalized_space",
                subject + "_" + session +
                "_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii.gz",
            )

            if path.exists(image_path):
                # GM analysis
                image_nii = nib.load(image_path)
                image_np = image_nii.get_fdata()
                image_segmentation_np = image_np * segmentation_np
                eyes_nmi_value = nmi(
                    occlusion1=template_segmentation_np,
                    occlusion2=image_segmentation_np,
                )

                non_zero_percentage = np.count_nonzero(
                    image_np) / image_np.size

                row = [[
                    subject,
                    session,
                    np.max(image_np),
                    non_zero_percentage,
                    eyes_nmi_value,
                ]]
                row_df = pd.DataFrame(row, columns=columns)
                results_df = pd.concat([results_df, row_df])

    results_df.sort_values("max_intensity", inplace=True, ascending=True)
    results_df.to_csv(filename, sep="\t", index=False)
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline."""
        import clinica.pipelines.statistics_volume_correction.statistics_volume_correction_utils as utils
        from clinica.utils.inputs import fetch_file, RemoteFileStructure
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from os.path import join, abspath, pardir, dirname, exists
        import numpy as np

        peak_correction_FWE = npe.Node(
            name='peak_correction_FWE',
            interface=nutil.Function(input_names=['t_map', 't_threshold'],
                                     output_names=['output'],
                                     function=utils.peak_correction))
        peak_correction_FWE.inputs.t_threshold = self.parameters['FWEp']

        peak_correction_FDR = peak_correction_FWE.clone(
            name='peak_correction_FDR')
        peak_correction_FDR.inputs.t_threshold = self.parameters['FDRp']

        cluster_correction_FWE = npe.Node(
            name='cluster_correction_FWE',
            interface=nutil.Function(
                input_names=['t_map', 't_thresh', 'c_thresh'],
                output_names=['output'],
                function=utils.cluster_correction))
        cluster_correction_FWE.inputs.t_thresh = self.parameters[
            'height_threshold']
        cluster_correction_FWE.inputs.c_thresh = self.parameters['FWEc']

        cluster_correction_FDR = cluster_correction_FWE.clone(
            name='cluster_correction_FDR')
        cluster_correction_FDR.inputs.t_thresh = self.parameters[
            'height_threshold']
        cluster_correction_FDR.inputs.c_thresh = self.parameters['FDRc']

        produce_fig_FWE_peak_correction = npe.Node(
            name='produce_figure_FWE_peak_correction',
            interface=nutil.Function(input_names=[
                'nii_file', 'template', 'type_of_correction', 't_thresh',
                'c_thresh', 'n_cuts'
            ],
                                     output_names=['figs'],
                                     function=utils.produce_figures))
        produce_fig_FWE_peak_correction.inputs.n_cuts = self.parameters[
            'n_cuts']

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, 'resources', 'masks')
        url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
        FILE1 = RemoteFileStructure(
            filename='mni_icbm152_t1_tal_nlin_sym_09a.nii.gz',
            url=url_aramis,
            checksum=
            '3b244ee7e287319d36a25263744c468ef0ab2fe5a94b15a2138844db73b49adf')

        produce_fig_FWE_peak_correction.inputs.template = join(
            path_to_mask, FILE1.filename)
        if not (exists(produce_fig_FWE_peak_correction.inputs.template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    'Unable to download required template (mni_icbm152) for processing:',
                    err)

        produce_fig_FDR_peak_correction = produce_fig_FWE_peak_correction.clone(
            name='produce_figure_FDR_peak_correction')
        produce_fig_FWE_cluster_correction = produce_fig_FWE_peak_correction.clone(
            name='produce_figure_FWE_cluster_correction')
        produce_fig_FDR_cluster_correction = produce_fig_FWE_peak_correction.clone(
            name='produce_figure_FDR_cluster_correction')

        produce_fig_FWE_peak_correction.inputs.type_of_correction = 'FWE'
        produce_fig_FDR_peak_correction.inputs.type_of_correction = 'FDR'
        produce_fig_FWE_cluster_correction.inputs.type_of_correction = 'FWE'
        produce_fig_FDR_cluster_correction.inputs.type_of_correction = 'FDR'

        produce_fig_FWE_peak_correction.inputs.t_thresh = self.parameters[
            'FWEp']
        produce_fig_FDR_peak_correction.inputs.t_thresh = self.parameters[
            'FDRp']
        produce_fig_FWE_cluster_correction.inputs.t_thresh = self.parameters[
            'height_threshold']
        produce_fig_FDR_cluster_correction.inputs.t_thresh = self.parameters[
            'height_threshold']

        produce_fig_FWE_peak_correction.inputs.c_thresh = np.nan
        produce_fig_FDR_peak_correction.inputs.c_thresh = np.nan
        produce_fig_FWE_cluster_correction.inputs.c_thresh = self.parameters[
            'FWEc']
        produce_fig_FDR_cluster_correction.inputs.c_thresh = self.parameters[
            'FDRc']

        save_fig_peak_correction_FWE = npe.Node(
            name='save_figure_peak_correction_FWE',
            interface=nutil.Function(input_names=['t_map', 'figs', 'name'],
                                     output_names=[],
                                     function=utils.generate_output))
        save_fig_peak_correction_FWE.inputs.name = 'FWEp'

        save_fig_peak_correction_FDR = save_fig_peak_correction_FWE.clone(
            name='save_fig_peak_correction_FDR')
        save_fig_peak_correction_FDR.inputs.name = 'FDRp'

        save_fig_cluster_correction_FWE = save_fig_peak_correction_FWE.clone(
            name='save_fig_cluster_correction_FWE')
        save_fig_cluster_correction_FWE.inputs.name = 'FWEc'

        save_fig_cluster_correction_FDR = save_fig_peak_correction_FWE.clone(
            name='save_fig_cluster_correction_FDR')
        save_fig_cluster_correction_FDR.inputs.name = 'FDRc'

        # Connection
        # ==========
        self.connect([
            (self.input_node, peak_correction_FWE, [('t_map', 't_map')]),
            (self.input_node, peak_correction_FDR, [('t_map', 't_map')]),
            (self.input_node, cluster_correction_FWE, [('t_map', 't_map')]),
            (self.input_node, cluster_correction_FDR, [('t_map', 't_map')]),
            (peak_correction_FWE, produce_fig_FWE_peak_correction,
             [('output', 'nii_file')]),
            (peak_correction_FDR, produce_fig_FDR_peak_correction,
             [('output', 'nii_file')]),
            (cluster_correction_FWE, produce_fig_FWE_cluster_correction,
             [('output', 'nii_file')]),
            (cluster_correction_FDR, produce_fig_FDR_cluster_correction,
             [('output', 'nii_file')]),
            (produce_fig_FWE_peak_correction, save_fig_peak_correction_FWE,
             [('figs', 'figs')]),
            (produce_fig_FDR_peak_correction, save_fig_peak_correction_FDR,
             [('figs', 'figs')]),
            (produce_fig_FWE_cluster_correction,
             save_fig_cluster_correction_FWE, [('figs', 'figs')]),
            (produce_fig_FDR_cluster_correction,
             save_fig_cluster_correction_FDR, [('figs', 'figs')]),
            (self.input_node, save_fig_peak_correction_FWE, [('t_map', 't_map')
                                                             ]),
            (self.input_node, save_fig_peak_correction_FDR, [('t_map', 't_map')
                                                             ]),
            (self.input_node, save_fig_cluster_correction_FWE, [('t_map',
                                                                 't_map')]),
            (self.input_node, save_fig_cluster_correction_FDR, [('t_map',
                                                                 't_map')])
        ])
예제 #14
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from os import pardir
        from os.path import abspath, dirname, exists, join

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.inputs import (
            RemoteFileStructure,
            clinica_file_reader,
            fetch_file,
        )
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="ref_cropped_template.nii.gz",
            url=url_aramis,
            checksum=
            "67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3",
        )
        FILE2 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09c.nii",
            url=url_aramis,
            checksum=
            "93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34",
        )

        self.ref_template = join(path_to_mask, FILE2.filename)
        self.ref_crop = join(path_to_mask, FILE1.filename)

        if not (exists(self.ref_template)):
            try:
                fetch_file(FILE2, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )

        if not (exists(self.ref_crop)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (ref_crop) for processing: {err}",
                    lvl="error",
                )

        # Display image(s) already present in CAPS folder
        # ===============================================
        processed_ids = self.get_processed_images(self.caps_directory,
                                                  self.subjects, self.sessions)
        if len(processed_ids) > 0:
            cprint(
                msg=
                f"Clinica found {len(processed_ids)} image(s) already processed in CAPS directory:",
                lvl="warning",
            )
            for image_id in processed_ids:
                cprint(msg=f"{image_id.replace('_', ' | ')}", lvl="warning")
            cprint(msg=f"Image(s) will be ignored by Clinica.", lvl="warning")
            input_ids = [
                p_id + "_" + s_id
                for p_id, s_id in zip(self.subjects, self.sessions)
            ]
            to_process_ids = list(set(input_ids) - set(processed_ids))
            self.subjects, self.sessions = extract_subjects_sessions_from_filename(
                to_process_ids)

        # Inputs from anat/ folder
        # ========================
        # T1w file:
        try:
            t1w_files = clinica_file_reader(self.subjects, self.sessions,
                                            self.bids_directory, T1W_NII)
        except ClinicaException as e:
            err = (
                "Clinica faced error(s) while trying to read files in your BIDS directory.\n"
                + str(e))
            raise ClinicaBIDSError(err)

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint("The pipeline will last approximately 6 minutes per image.")

        read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ("t1w", t1w_files),
            ],
            synchronize=True,
            interface=nutil.IdentityInterface(fields=self.get_input_fields()),
        )
        self.connect([
            (read_node, self.input_node, [("t1w", "t1w")]),
        ])
예제 #15
0
파일: generate.py 프로젝트: mdiazmel/AD-DL
def generate_trivial_dataset(
    caps_directory,
    output_dir,
    n_subjects,
    tsv_path=None,
    preprocessing="t1-linear",
    mask_path=None,
    atrophy_percent=60,
    multi_cohort=False,
):
    """
    Generates a fully separable dataset.

    Generates a dataset, based on the images of the CAPS directory, where a
    half of the image is processed using a mask to oclude a specific region.
    This procedure creates a dataset fully separable (images with half-right
    processed and image with half-left processed)

    Args:
        caps_directory: (str) path to the CAPS directory.
        output_dir: (str) folder containing the synthetic dataset in CAPS format.
        n_subjects: (int) number of subjects in each class of the synthetic
            dataset.
        tsv_path: (str) path to tsv file of list of subjects/sessions.
        preprocessing: (str) preprocessing performed. Must be in ['linear', 'extensive'].
        mask_path: (str) path to the extracted masks to generate the two labels.
        atrophy_percent: (float) percentage of atrophy applied.
        multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.

    Returns:
        Folder structure where images are stored in CAPS format.

    Raises:
        ValueError: if `n_subjects` is higher than the length of the TSV file at `tsv_path`.
    """
    from pathlib import Path

    commandline_to_json({
        "output_dir": output_dir,
        "caps_dir": caps_directory,
        "preprocessing": preprocessing,
        "n_subjects": n_subjects,
        "atrophy_percent": atrophy_percent,
    })

    # Transform caps_directory in dict
    caps_dict = CapsDataset.create_caps_dict(caps_directory,
                                             multi_cohort=multi_cohort)

    # Read DataFrame
    data_df = load_and_check_tsv(tsv_path, caps_dict, output_dir)
    data_df = extract_baseline(data_df)

    home = str(Path.home())
    cache_clinicadl = join(home, ".cache", "clinicadl", "ressources", "masks")
    url_aramis = "https://aramislab.paris.inria.fr/files/data/masks/"
    FILE1 = RemoteFileStructure(
        filename="AAL2.tar.gz",
        url=url_aramis,
        checksum=
        "89427970921674792481bffd2de095c8fbf49509d615e7e09e4bc6f0e0564471",
    )
    makedirs(cache_clinicadl, exist_ok=True)

    if n_subjects > len(data_df):
        raise ValueError(
            f"The number of subjects {n_subjects} cannot be higher "
            f"than the number of subjects in the baseline dataset of size {len(data_df)}"
        )

    if mask_path is None:
        if not exists(join(cache_clinicadl, "AAL2")):
            try:
                print("Try to download AAL2 masks")
                mask_path_tar = fetch_file(FILE1, cache_clinicadl)
                tar_file = tarfile.open(mask_path_tar)
                print("File: " + mask_path_tar)
                try:
                    tar_file.extractall(cache_clinicadl)
                    tar_file.close()
                    mask_path = join(cache_clinicadl, "AAL2")
                except RuntimeError:
                    print("Unable to extract downloaded files")
            except IOError as err:
                print("Unable to download required templates:", err)
                raise ValueError(
                    """Unable to download masks, please download them
                                  manually at https://aramislab.paris.inria.fr/files/data/masks/
                                  and provide a valid path.""")
        else:
            mask_path = join(cache_clinicadl, "AAL2")

    # Create subjects dir
    makedirs(join(output_dir, "subjects"), exist_ok=True)

    # Output tsv file
    columns = ["participant_id", "session_id", "diagnosis", "age_bl", "sex"]
    output_df = pd.DataFrame(columns=columns)
    diagnosis_list = ["AD", "CN"]

    for i in range(2 * n_subjects):
        data_idx = i // 2
        label = i % 2

        participant_id = data_df.loc[data_idx, "participant_id"]
        session_id = data_df.loc[data_idx, "session_id"]
        cohort = data_df.loc[data_idx, "cohort"]
        filename = f"sub-TRIV{i}_ses-M00" + FILENAME_TYPE["cropped"] + ".nii.gz"
        path_image = join(output_dir, "subjects", f"sub-TRIV{i}", "ses-M00",
                          "t1_linear")

        makedirs(path_image, exist_ok=True)

        image_path = find_image_path(caps_dict, participant_id, session_id,
                                     cohort, preprocessing)
        image_nii = nib.load(image_path)
        image = image_nii.get_data()

        atlas_to_mask = nib.load(join(mask_path,
                                      f"mask-{label + 1}.nii")).get_data()

        # Create atrophied image
        trivial_image = im_loss_roi_gaussian_distribution(
            image, atlas_to_mask, atrophy_percent)
        trivial_image_nii = nib.Nifti1Image(trivial_image,
                                            affine=image_nii.affine)
        trivial_image_nii.to_filename(join(path_image, filename))

        # Append row to output tsv
        row = [f"sub-TRIV{i}", "ses-M00", diagnosis_list[label], 60, "F"]
        row_df = pd.DataFrame([row], columns=columns)
        output_df = output_df.append(row_df)

    output_df.to_csv(join(output_dir, "data.tsv"), sep="\t", index=False)

    missing_path = join(output_dir, "missing_mods")
    makedirs(missing_path, exist_ok=True)

    sessions = output_df.session_id.unique()
    for session in sessions:
        session_df = output_df[output_df.session_id == session]
        out_df = copy(session_df[["participant_id"]])
        out_df["synthetic"] = [1] * len(out_df)
        out_df.to_csv(join(missing_path, f"missing_mods_{session}.tsv"),
                      sep="\t",
                      index=False)
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline."""
        from os.path import abspath, dirname, exists, join, pardir

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        import numpy as np

        import clinica.pipelines.statistics_volume_correction.statistics_volume_correction_utils as utils
        from clinica.utils.inputs import RemoteFileStructure, fetch_file
        from clinica.utils.stream import cprint

        peak_correction_FWE = npe.Node(
            name="peak_correction_FWE",
            interface=nutil.Function(
                input_names=["t_map", "t_threshold"],
                output_names=["output"],
                function=utils.peak_correction,
            ),
        )
        peak_correction_FWE.inputs.t_threshold = self.parameters["FWEp"]

        peak_correction_FDR = peak_correction_FWE.clone(
            name="peak_correction_FDR")
        peak_correction_FDR.inputs.t_threshold = self.parameters["FDRp"]

        cluster_correction_FWE = npe.Node(
            name="cluster_correction_FWE",
            interface=nutil.Function(
                input_names=["t_map", "t_thresh", "c_thresh"],
                output_names=["output"],
                function=utils.cluster_correction,
            ),
        )
        cluster_correction_FWE.inputs.t_thresh = self.parameters[
            "height_threshold"]
        cluster_correction_FWE.inputs.c_thresh = self.parameters["FWEc"]

        cluster_correction_FDR = cluster_correction_FWE.clone(
            name="cluster_correction_FDR")
        cluster_correction_FDR.inputs.t_thresh = self.parameters[
            "height_threshold"]
        cluster_correction_FDR.inputs.c_thresh = self.parameters["FDRc"]

        produce_fig_FWE_peak_correction = npe.Node(
            name="produce_figure_FWE_peak_correction",
            interface=nutil.Function(
                input_names=[
                    "nii_file",
                    "template",
                    "type_of_correction",
                    "t_thresh",
                    "c_thresh",
                    "n_cuts",
                ],
                output_names=["figs"],
                function=utils.produce_figures,
            ),
        )
        produce_fig_FWE_peak_correction.inputs.n_cuts = self.parameters[
            "n_cuts"]

        root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
        path_to_mask = join(root, "resources", "masks")
        url_aramis = "https://aramislab.paris.inria.fr/files/data/img_t1_linear/"
        FILE1 = RemoteFileStructure(
            filename="mni_icbm152_t1_tal_nlin_sym_09a.nii.gz",
            url=url_aramis,
            checksum=
            "3b244ee7e287319d36a25263744c468ef0ab2fe5a94b15a2138844db73b49adf",
        )

        produce_fig_FWE_peak_correction.inputs.template = join(
            path_to_mask, FILE1.filename)
        if not (exists(produce_fig_FWE_peak_correction.inputs.template)):
            try:
                fetch_file(FILE1, path_to_mask)
            except IOError as err:
                cprint(
                    msg=
                    f"Unable to download required template (mni_icbm152) for processing: {err}",
                    lvl="error",
                )

        produce_fig_FDR_peak_correction = produce_fig_FWE_peak_correction.clone(
            name="produce_figure_FDR_peak_correction")
        produce_fig_FWE_cluster_correction = produce_fig_FWE_peak_correction.clone(
            name="produce_figure_FWE_cluster_correction")
        produce_fig_FDR_cluster_correction = produce_fig_FWE_peak_correction.clone(
            name="produce_figure_FDR_cluster_correction")

        # fmt: off
        produce_fig_FWE_peak_correction.inputs.type_of_correction = "FWE"
        produce_fig_FDR_peak_correction.inputs.type_of_correction = "FDR"
        produce_fig_FWE_cluster_correction.inputs.type_of_correction = "FWE"
        produce_fig_FDR_cluster_correction.inputs.type_of_correction = "FDR"

        produce_fig_FWE_peak_correction.inputs.t_thresh = self.parameters[
            "FWEp"]
        produce_fig_FDR_peak_correction.inputs.t_thresh = self.parameters[
            "FDRp"]
        produce_fig_FWE_cluster_correction.inputs.t_thresh = self.parameters[
            "height_threshold"]
        produce_fig_FDR_cluster_correction.inputs.t_thresh = self.parameters[
            "height_threshold"]

        produce_fig_FWE_peak_correction.inputs.c_thresh = np.nan
        produce_fig_FDR_peak_correction.inputs.c_thresh = np.nan
        produce_fig_FWE_cluster_correction.inputs.c_thresh = self.parameters[
            "FWEc"]
        produce_fig_FDR_cluster_correction.inputs.c_thresh = self.parameters[
            "FDRc"]
        # fmt: on

        save_fig_peak_correction_FWE = npe.Node(
            name="save_figure_peak_correction_FWE",
            interface=nutil.Function(
                input_names=["t_map", "figs", "name"],
                output_names=[],
                function=utils.generate_output,
            ),
        )
        save_fig_peak_correction_FWE.inputs.name = "FWEp"

        save_fig_peak_correction_FDR = save_fig_peak_correction_FWE.clone(
            name="save_fig_peak_correction_FDR")
        save_fig_peak_correction_FDR.inputs.name = "FDRp"

        save_fig_cluster_correction_FWE = save_fig_peak_correction_FWE.clone(
            name="save_fig_cluster_correction_FWE")
        save_fig_cluster_correction_FWE.inputs.name = "FWEc"

        save_fig_cluster_correction_FDR = save_fig_peak_correction_FWE.clone(
            name="save_fig_cluster_correction_FDR")
        save_fig_cluster_correction_FDR.inputs.name = "FDRc"

        # Connection
        # ==========
        # fmt: off
        self.connect([
            (self.input_node, peak_correction_FWE, [("t_map", "t_map")]),
            (self.input_node, peak_correction_FDR, [("t_map", "t_map")]),
            (self.input_node, cluster_correction_FWE, [("t_map", "t_map")]),
            (self.input_node, cluster_correction_FDR, [("t_map", "t_map")]),
            (peak_correction_FWE, produce_fig_FWE_peak_correction,
             [("output", "nii_file")]),
            (peak_correction_FDR, produce_fig_FDR_peak_correction,
             [("output", "nii_file")]),
            (cluster_correction_FWE, produce_fig_FWE_cluster_correction,
             [("output", "nii_file")]),
            (cluster_correction_FDR, produce_fig_FDR_cluster_correction,
             [("output", "nii_file")]),
            (produce_fig_FWE_peak_correction, save_fig_peak_correction_FWE,
             [("figs", "figs")]),
            (produce_fig_FDR_peak_correction, save_fig_peak_correction_FDR,
             [("figs", "figs")]),
            (produce_fig_FWE_cluster_correction,
             save_fig_cluster_correction_FWE, [("figs", "figs")]),
            (produce_fig_FDR_cluster_correction,
             save_fig_cluster_correction_FDR, [("figs", "figs")]),
            (self.input_node, save_fig_peak_correction_FWE, [("t_map", "t_map")
                                                             ]),
            (self.input_node, save_fig_peak_correction_FDR, [("t_map", "t_map")
                                                             ]),
            (self.input_node, save_fig_cluster_correction_FWE, [("t_map",
                                                                 "t_map")]),
            (self.input_node, save_fig_cluster_correction_FDR, [("t_map",
                                                                 "t_map")]),
        ])