Пример #1
0
def init_input_node(pet_nii):
    import datetime

    import nibabel as nib
    from colorama import Fore

    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.stream import cprint
    from clinica.utils.ux import print_begin_image

    # Extract image ID
    image_id = get_subject_id(pet_nii)

    # Check that the PET file is a 3D volume
    img = nib.load(pet_nii)
    if len(img.shape) == 4:
        now = datetime.datetime.now().strftime("%H:%M:%S")
        error_msg = (
            f"{Fore.RED}[{now}] Error: Clinica does not handle 4D volumes "
            f"for {image_id.replace('_', ' | ')}{Fore.RESET}")
        cprint(error_msg)
        raise NotImplementedError(error_msg)

    # Print begin message
    print_begin_image(image_id)

    return pet_nii
Пример #2
0
def init_input_node(t1w, recon_all_args, output_dir):
    """Initialize the pipeline.

    This function will:
        - Extract <image_id> (e.g. sub-CLNC01_ses-M00) T1w filename;
        - Check FOV of T1w;
        - Create SUBJECTS_DIR for recon-all (otherwise, the command won't run);
        - Print begin execution message.
    """
    import os
    import errno
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.freesurfer import check_flags
    from clinica.utils.ux import print_begin_image

    # Extract <image_id>
    image_id = get_subject_id(t1w)

    # Check flags for T1w
    flags = check_flags(t1w, recon_all_args)

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    subjects_dir = os.path.join(output_dir, image_id)
    try:
        os.makedirs(subjects_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:  # EEXIST: folder already exists
            raise e

    print_begin_image(image_id, ['ReconAllArgs'], [flags])

    return image_id, t1w, flags, subjects_dir
Пример #3
0
def init_input_node(t1w, dwi, bvec, bval, dwi_json):
    """Initialize the pipeline."""
    from clinica.utils.dwi import bids_dir_to_fsl_dir, check_dwi_volume
    from clinica.utils.filemanip import extract_metadata_from_json, get_subject_id
    from clinica.utils.ux import print_begin_image

    # Extract image ID
    image_id = get_subject_id(t1w)

    # Check that the number of DWI, bvec & bval are the same
    check_dwi_volume(dwi, bvec, bval)

    # Read metadata from DWI JSON file:
    [total_readout_time, phase_encoding_direction] = extract_metadata_from_json(
        dwi_json, ["TotalReadoutTime", "PhaseEncodingDirection"]
    )
    phase_encoding_direction = bids_dir_to_fsl_dir(phase_encoding_direction)

    # Print begin message
    print_begin_image(
        image_id,
        ["TotalReadoutTime", "PhaseEncodingDirection"],
        [str(total_readout_time), phase_encoding_direction],
    )

    return (
        image_id,
        t1w,
        dwi,
        bvec,
        bval,
        total_readout_time,
        phase_encoding_direction,
    )
Пример #4
0
def init_input_node(t1w, recon_all_args, output_dir):
    """Initialize the pipeline.

    This function will:
        - Extract <image_id> (e.g. sub-CLNC01_ses-M00) T1w filename;
        - Check FOV of T1w;
        - Create SUBJECTS_DIR for recon-all (otherwise, the command won't run);
        - Print begin execution message.
    """
    import os

    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.freesurfer import check_flags
    from clinica.utils.ux import print_begin_image

    # Extract <image_id>
    image_id = get_subject_id(t1w)

    # Check flags for T1w
    flags = check_flags(t1w, recon_all_args)

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    subjects_dir = os.path.join(output_dir, image_id)
    os.makedirs(subjects_dir, exist_ok=True)

    print_begin_image(image_id, ["ReconAllArgs"], [flags])

    return image_id, t1w, flags, subjects_dir
Пример #5
0
def init_input_node(pet):
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.ux import print_begin_image

    # Extract image ID
    image_id = get_subject_id(pet)
    print_begin_image(image_id)
    return pet
Пример #6
0
def init_input_node(t1w):
    """Extract "sub-<participant_id>_ses-<session_label>" from <t1w> and print begin message."""
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.ux import print_begin_image

    subject_id = get_subject_id(t1w)
    print_begin_image(subject_id)

    return subject_id, t1w
Пример #7
0
def init_input_node(caps_dir, participant_id, session_id, long_id, output_dir):
    """Initialize the pipeline."""
    import os
    import errno
    import datetime
    import platform
    from tempfile import mkdtemp
    from colorama import Fore
    from clinica.utils.longitudinal import read_sessions
    from clinica.utils.ux import print_begin_image
    from clinica.utils.stream import cprint

    # Extract <image_id>
    image_id = '{0}_{1}_{2}'.format(participant_id, session_id, long_id)

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    if platform.system().lower().startswith('darwin'):
        # Special case: On macOS, 'recon-all -long' can failed if the $SUBJECTS_DIR is too long
        # To circumvent this issue, we create a sym link in $(TMP) so that $SUBJECTS_DIR is a short path
        subjects_dir = mkdtemp()
        now = datetime.datetime.now().strftime('%H:%M:%S')
        cprint(
            '%s[%s] Needs to create a $SUBJECTS_DIR folder in %s for %s (macOS case). %s'
            % (Fore.YELLOW, now, subjects_dir, image_id.replace(
                '_', ' | '), Fore.RESET))
    else:
        subjects_dir = os.path.join(output_dir, image_id)

    try:
        os.makedirs(subjects_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:  # EEXIST: folder already exists
            raise e

    # Create symbolic link containing cross-sectional segmentation(s) in SUBJECTS_DIR so that recon-all can run
    for s_id in read_sessions(caps_dir, participant_id, long_id):
        cross_sectional_path = os.path.join(caps_dir, 'subjects',
                                            participant_id, s_id, 't1',
                                            'freesurfer_cross_sectional',
                                            participant_id + '_' + s_id)
        os.symlink(cross_sectional_path,
                   os.path.join(subjects_dir, participant_id + '_' + s_id))

    # Create symbolic links containing unbiased template in SUBJECTS_DIR so that recon-all can run
    template_path = os.path.join(caps_dir, 'subjects', participant_id, long_id,
                                 'freesurfer_unbiased_template',
                                 participant_id + '_' + long_id)
    os.symlink(template_path,
               os.path.join(subjects_dir, participant_id + '_' + long_id))

    print_begin_image(image_id)

    return subjects_dir
Пример #8
0
def init_input_node(parameters, base_dir, subjects_visits_tsv):
    """Initialize the pipeline.

    This function will:
        - Create `surfstat_results_dir` in `base_dir`/<group_id> for SurfStat;
        - Save pipeline parameters in JSON file;
        - Copy TSV file with covariates;
        - Print begin execution message.
    """
    import os
    import errno
    import json
    import shutil
    from clinica.utils.ux import print_begin_image
    from clinica.pipelines.statistics_surface.statistics_surface_utils import create_glm_info_dictionary

    group_id = 'group-' + parameters['group_label']

    # Create surfstat_results_dir for SurfStat
    surfstat_results_dir = os.path.join(base_dir, group_id)
    try:
        os.makedirs(surfstat_results_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:  # EEXIST: folder already exists
            raise e

    # Save pipeline parameters in JSON file
    glm_dict = create_glm_info_dictionary(subjects_visits_tsv, parameters)
    json_filename = os.path.join(surfstat_results_dir, group_id + '_glm.json')
    with open(json_filename, 'w') as json_file:
        json.dump(glm_dict, json_file, indent=4)

    # Copy TSV file with covariates
    tsv_filename = os.path.join(surfstat_results_dir,
                                group_id + '_covariates.tsv')
    shutil.copyfile(subjects_visits_tsv, tsv_filename)

    # Print begin message
    list_keys = [
        'AnalysisType', 'Covariates', 'Contrast', 'FWHM', 'ClusterThreshold'
    ]
    list_values = [
        parameters['glm_type'], parameters['covariates'],
        parameters['contrast'],
        str(parameters['full_width_at_half_maximum']),
        str(parameters['cluster_threshold'])
    ]
    group_id = 'group-' + parameters['group_label']
    print_begin_image(group_id, list_keys, list_values)

    return parameters['group_label'], surfstat_results_dir
Пример #9
0
def init_input_node(parameters, base_dir, subjects_visits_tsv):
    """Initialize the pipeline.

    This function will:
        - Create `surfstat_results_dir` in `base_dir`/<group_id> for SurfStat;
        - Save pipeline parameters in JSON file;
        - Copy TSV file with covariates;
        - Print begin execution message.
    """
    import json
    import os
    import shutil

    from clinica.pipelines.statistics_surface.statistics_surface_utils import (
        create_glm_info_dictionary, )
    from clinica.utils.ux import print_begin_image

    group_id = "group-" + parameters["group_label"]

    # Create surfstat_results_dir for SurfStat
    surfstat_results_dir = os.path.join(base_dir, group_id)
    os.makedirs(surfstat_results_dir, exist_ok=True)

    # Save pipeline parameters in JSON file
    glm_dict = create_glm_info_dictionary(subjects_visits_tsv, parameters)
    json_filename = os.path.join(surfstat_results_dir, group_id + "_glm.json")
    with open(json_filename, "w") as json_file:
        json.dump(glm_dict, json_file, indent=4)

    # Copy TSV file with covariates
    tsv_filename = os.path.join(surfstat_results_dir,
                                group_id + "_covariates.tsv")
    shutil.copyfile(subjects_visits_tsv, tsv_filename)

    # Print begin message
    list_keys = [
        "AnalysisType", "Covariates", "Contrast", "FWHM", "ClusterThreshold"
    ]
    list_values = [
        parameters["glm_type"],
        parameters["covariates"],
        parameters["contrast"],
        str(parameters["full_width_at_half_maximum"]),
        str(parameters["cluster_threshold"]),
    ]
    group_id = "group-" + parameters["group_label"]
    print_begin_image(group_id, list_keys, list_values)

    return parameters["group_label"], surfstat_results_dir
Пример #10
0
def init_input_node(pet_nii):
    import nibabel as nib

    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.stream import cprint
    from clinica.utils.ux import print_begin_image

    # Extract image ID
    image_id = get_subject_id(pet_nii)

    # Check that the PET file is a 3D volume
    img = nib.load(pet_nii)
    if len(img.shape) == 4:
        error_msg = f"Clinica does not handle 4D volumes for {image_id.replace('_', ' | ')}"
        cprint(error_msg, lvl="error")
        raise NotImplementedError(error_msg)

    # Print begin message
    print_begin_image(image_id)

    return pet_nii
Пример #11
0
def init_input_node(dwi, bvec, bval, dwi_json, fmap_magnitude, fmap_phasediff,
                    fmap_phasediff_json):
    """Initialize pipeline (read JSON, check files and print begin message)."""
    import datetime

    import nibabel as nib
    from clinica.utils.dwi import bids_dir_to_fsl_dir, check_dwi_volume
    from clinica.utils.filemanip import extract_metadata_from_json, get_subject_id
    from clinica.utils.stream import cprint
    from clinica.utils.ux import print_begin_image

    # Extract image ID
    image_id = get_subject_id(dwi)

    # Check that the number of DWI, bvec & bval are the same
    try:
        check_dwi_volume(dwi, bvec, bval)
    except ValueError as e:
        now = datetime.datetime.now().strftime("%H:%M:%S")
        error_msg = (
            f"[{now}] Error: Number of DWIs, b-vals and b-vecs mismatch for {image_id.replace('_', ' | ')}"
        )
        cprint(error_msg, lvl="error")
        raise ValueError(e)

    # Check that PhaseDiff and magnitude1 have the same header
    # Otherwise, FSL in FugueExtrapolationFromMask will crash
    img_phasediff = nib.load(fmap_phasediff)
    img_magnitude = nib.load(fmap_magnitude)
    if img_phasediff.shape != img_magnitude.shape:
        now = datetime.datetime.now().strftime("%H:%M:%S")
        error_msg = (
            f"[{now}] Error: Headers of PhaseDiff and Magnitude1 are not the same "
            f"for {image_id.replace('_', ' | ')} ({img_phasediff.shape} vs {img_magnitude.shape})"
        )
        cprint(error_msg, lvl="error")
        raise NotImplementedError(error_msg)

    # Read metadata from DWI JSON file:
    [total_readout_time,
     phase_encoding_direction] = extract_metadata_from_json(
         dwi_json, ["TotalReadoutTime", "PhaseEncodingDirection"])
    phase_encoding_direction = bids_dir_to_fsl_dir(phase_encoding_direction)

    # Read metadata from PhaseDiff JSON file:
    [echo_time1,
     echo_time2] = extract_metadata_from_json(fmap_phasediff_json,
                                              ["EchoTime1", "EchoTime2"])
    delta_echo_time = abs(echo_time2 - echo_time1)

    # Print begin message
    print_begin_image(
        image_id,
        ["TotalReadoutTime", "PhaseEncodingDirection", "DeltaEchoTime"],
        [
            str(total_readout_time), phase_encoding_direction,
            str(delta_echo_time)
        ],
    )

    return (
        image_id,
        dwi,
        bvec,
        bval,
        total_readout_time,
        phase_encoding_direction,
        fmap_magnitude,
        fmap_phasediff,
        delta_echo_time,
    )
Пример #12
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import sys

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.input_files import t1_volume_dartel_input_tissue
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.stream import cprint
        from clinica.utils.ux import (
            print_begin_image,
            print_groups_in_caps_directory,
            print_images_to_process,
        )

        representative_output = os.path.join(
            self.caps_directory,
            "groups",
            f"group-{self.parameters['group_label']}",
            "t1",
            f"group-{self.parameters['group_label']}_template.nii.gz",
        )
        if os.path.exists(representative_output):
            cprint(
                msg=
                (f"DARTEL template for {self.parameters['group_label']} already exists. "
                 "Currently, Clinica does not propose to overwrite outputs for this pipeline."
                 ),
                lvl="warning",
            )
            print_groups_in_caps_directory(self.caps_directory)
            sys.exit(0)

        # Check that there is at least 2 subjects
        if len(self.subjects) <= 1:
            raise ClinicaException(
                "This pipeline needs at least 2 images to create DARTEL "
                f"template but Clinica only found {len(self.subjects)}.")

        read_parameters_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(fields=self.get_input_fields(),
                                              mandatory_inputs=True),
        )
        all_errors = []
        d_input = []
        for tissue_number in self.parameters["dartel_tissues"]:
            try:
                current_file = clinica_file_reader(
                    self.subjects,
                    self.sessions,
                    self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number),
                )
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = "Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n"
            for msg in all_errors:
                error_message += str(msg)
            raise RuntimeError(error_message)

        # d_input is a list of size len(self.parameters['dartel_tissues'])
        #     Each element of this list is a list of size len(self.subjects)
        read_parameters_node.inputs.dartel_inputs = d_input

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                "Computational time for DARTEL creation will depend on the number of images."
            )
            print_begin_image(f"group-{self.parameters['group_label']}")

        # fmt: off
        self.connect([(read_parameters_node, self.input_node,
                       [("dartel_inputs", "dartel_input_images")])])
def init_input_node(caps_dir, participant_id, list_session_ids, output_dir):
    """Initialize the pipeline.

    This function will create folders and symbolic links in SUBJECTS_DIR env variable for upcoming run of recon-all.

    Note (@alexis-g-icm):
        There currently (as of 22 Feb 2019) is a bug in FreeSurfer recon-all -base, which in some cases (e.g., only one
        time point), will crash as it's trying to write lines too long for the shell to handle. This is caused by
        the path to FreeSurfer SUBJECT_DIR being too long itself.

    The current function works around this issue by checking if there only is one session associated to a subject, and
    in that case, putting the SUBJECT_DIR inside the system temporary folder so that its path is as short as possible.
    """
    import os
    from tempfile import mkdtemp

    from clinica.compat import errno
    from clinica.utils.longitudinal import get_long_id
    from clinica.utils.stream import cprint
    from clinica.utils.ux import print_begin_image

    # Extract <image_id>
    long_id = get_long_id(list_session_ids)
    image_id = f"{participant_id}_{long_id}"

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    if len(list_session_ids) == 1:
        # Special case: When only one time point is used, 'recon-all -base' can failed
        # if the $SUBJECTS_DIR is too long ('Word too long.' error).
        # To circumvent this issue, we create a sym link in $(TMP) so that $SUBJECTS_DIR is a short path
        subjects_dir = mkdtemp()
        cprint(
            msg=(
                f"{image_id.replace('_', ' | ')} has only one time point. "
                f"Needs to create a $SUBJECTS_DIR folder in {subjects_dir}"
            ),
            lvl="warning",
        )
    else:
        subjects_dir = os.path.join(output_dir, image_id)

    os.makedirs(subjects_dir, exist_ok=True)

    # Create symbolic links containing cross-sectional segmentation(s) in SUBJECTS_DIR so that recon-all can run
    for session_id in list_session_ids:
        cross_sectional_path = os.path.join(
            caps_dir,
            "subjects",
            participant_id,
            session_id,
            "t1",
            "freesurfer_cross_sectional",
            f"{participant_id}_{session_id}",
        )
        try:
            os.symlink(
                cross_sectional_path,
                os.path.join(subjects_dir, f"{participant_id}_{session_id}"),
            )
        except FileExistsError as e:
            if e.errno != errno.EEXIST:  # EEXIST: folder already exists
                raise e

    # Prepare arguments for recon-all.
    flags = ""
    for session_id in list_session_ids:
        flags += f" -tp {participant_id}_{session_id}"

    print_begin_image(image_id)

    return image_id, subjects_dir, flags
Пример #14
0
def init_input_node(caps_dir, participant_id, list_session_ids, output_dir):
    """Initialize the pipeline.

    This function will create folders and symbolic links in SUBJECTS_DIR env variable for upcoming run of recon-all.

    Note (@alexis-g-icm):
        There currently (as of 22 Feb 2019) is a bug in FreeSurfer recon-all -base, which in some cases (e.g., only one
        time point), will crash as it's trying to write lines too long for the shell to handle. This is caused by
        the path to FreeSurfer SUBJECT_DIR being too long itself.

    The current function works around this issue by checking if there only is one session associated to a subject, and
    in that case, putting the SUBJECT_DIR inside the system temporary folder so that its path is as short as possible.
    """
    import os
    import errno
    import datetime
    from tempfile import mkdtemp
    from colorama import Fore
    from clinica.utils.stream import cprint
    from clinica.utils.longitudinal import get_long_id
    from clinica.utils.ux import print_begin_image

    # Extract <image_id>
    long_id = get_long_id(list_session_ids)
    image_id = participant_id + '_' + long_id

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    if len(list_session_ids) == 1:
        # Special case: When only one time point is used, 'recon-all -base' can failed
        # if the $SUBJECTS_DIR is too long ('Word too long.' error).
        # To circumvent this issue, we create a sym link in $(TMP) so that $SUBJECTS_DIR is a short path
        subjects_dir = mkdtemp()
        now = datetime.datetime.now().strftime('%H:%M:%S')
        cprint(
            '%s[%s] %s has only one time point. Needs to create a $SUBJECTS_DIR folder in %s%s'
            % (Fore.YELLOW, now, image_id.replace(
                '_', ' | '), subjects_dir, Fore.RESET))
    else:
        subjects_dir = os.path.join(output_dir, image_id)
    try:
        os.makedirs(subjects_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:  # EEXIST: folder already exists
            raise e

    # Create symbolic links containing cross-sectional segmentation(s) in SUBJECTS_DIR so that recon-all can run
    for session_id in list_session_ids:
        cross_sectional_path = os.path.join(caps_dir, 'subjects',
                                            participant_id, session_id, 't1',
                                            'freesurfer_cross_sectional',
                                            participant_id + '_' + session_id)
        try:
            os.symlink(
                cross_sectional_path,
                os.path.join(subjects_dir, participant_id + '_' + session_id))
        except FileExistsError as e:
            if e.errno != errno.EEXIST:  # EEXIST: folder already exists
                raise e

    # Prepare arguments for recon-all.
    flags = ""
    for session_id in list_session_ids:
        flags += " -tp " + participant_id + "_" + session_id

    print_begin_image(image_id)

    return image_id, subjects_dir, flags
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import (t1_volume_template_tpm_in_mni,
                                               pet_volume_normalized_suvr_pet)
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process, print_begin_image

        all_errors = []
        if self.parameters['orig_input_data'] == 'pet-volume':
            if not (self.parameters["acq_label"]
                    and self.parameters["suvr_reference_region"]):
                raise ValueError(
                    f"Missing value(s) in parameters from pet-volume pipeline. Given values:\n"
                    f"- acq_label: {self.parameters['acq_label']}\n"
                    f"- suvr_reference_region: {self.parameters['suvr_reference_region']}\n"
                    f"- use_pvc_data: {self.parameters['use_pvc_data']}\n")

            self.parameters['measure_label'] = self.parameters['acq_label']
            information_dict = pet_volume_normalized_suvr_pet(
                acq_label=self.parameters["acq_label"],
                group_label=self.parameters["group_label_dartel"],
                suvr_reference_region=self.parameters["suvr_reference_region"],
                use_brainmasked_image=True,
                use_pvc_data=self.parameters["use_pvc_data"],
                fwhm=self.parameters['full_width_at_half_maximum'])
        elif self.parameters['orig_input_data'] == 't1-volume':
            self.parameters['measure_label'] = 'graymatter'
            information_dict = t1_volume_template_tpm_in_mni(
                self.parameters['group_label_dartel'], 0, True)

        elif self.parameters['orig_input_data'] == 'custom-pipeline':
            if self.parameters['custom_file'] is None:
                raise ClinicaException(
                    f"{Fore.RED}Custom pipeline was selected but no 'custom_file' was specified.{Fore.RESET}"
                )
            # If custom file are grabbed, information of fwhm is irrelevant and should not appear on final filenames
            self.parameters['full_width_at_half_maximum'] = None
            information_dict = {
                'pattern': self.parameters['custom_file'],
                'description': 'custom file provided by user'
            }
        else:
            raise ValueError(
                f"Input data {self.parameters['orig_input_data']} unknown.")

        try:
            input_files = clinica_file_reader(self.subjects, self.sessions,
                                              self.caps_directory,
                                              information_dict)
        except ClinicaException as e:
            all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaException(error_message)

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True),
                                        synchronize=True)
        read_parameters_node.inputs.input_files = input_files

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                'The pipeline will last a few minutes. Images generated by SPM will popup during the pipeline.'
            )
            print_begin_image(f"group-{self.parameters['group_label']}")

        self.connect([(read_parameters_node, self.input_node,
                       [('input_files', 'input_files')])])
Пример #16
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        import sys
        from colorama import Fore
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import t1_volume_dartel_input_tissue
        from clinica.utils.exceptions import ClinicaException
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_groups_in_caps_directory, print_images_to_process, print_begin_image

        representative_output = os.path.join(
            self.caps_directory, 'groups',
            'group-' + self.parameters['group_label'], 't1',
            'group-' + self.parameters['group_label'] + '_template.nii.gz')
        if os.path.exists(representative_output):
            cprint(
                "%sDARTEL template for %s already exists. Currently, Clinica does not propose to overwrite outputs "
                "for this pipeline.%s" %
                (Fore.YELLOW, self.parameters['group_label'], Fore.RESET))
            print_groups_in_caps_directory(self.caps_directory)
            sys.exit(0)

        # Check that there is at least 2 subjects
        if len(self.subjects) <= 1:
            raise ClinicaException(
                '%sThis pipeline needs at least 2 images to create DARTEL template but '
                'Clinica only found %s.%s' %
                (Fore.RED, len(self.subjects), Fore.RESET))

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True))
        all_errors = []
        d_input = []
        for tissue_number in self.parameters['dartel_tissues']:
            try:
                current_file = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory,
                    t1_volume_dartel_input_tissue(tissue_number))
                d_input.append(current_file)
            except ClinicaException as e:
                all_errors.append(e)

        # Raise all errors if some happened
        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise RuntimeError(error_message)

        # d_input is a list of size len(self.parameters['dartel_tissues'])
        #     Each element of this list is a list of size len(self.subjects)
        read_parameters_node.inputs.dartel_inputs = d_input

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                'Computational time for DARTEL creation will depend on the number of images.'
            )
            print_begin_image('group-' + self.parameters['group_label'])

        self.connect([(read_parameters_node, self.input_node,
                       [('dartel_inputs', 'dartel_input_images')])])
def init_input_node(caps_dir, participant_id, session_id, long_id, output_dir):
    """Initialize the pipeline."""
    import os
    import platform
    from tempfile import mkdtemp

    from clinica.utils.longitudinal import read_sessions
    from clinica.utils.stream import cprint
    from clinica.utils.ux import print_begin_image

    # Extract <image_id>
    image_id = "{0}_{1}_{2}".format(participant_id, session_id, long_id)

    # Create SUBJECTS_DIR for recon-all (otherwise, the command won't run)
    if platform.system().lower().startswith("darwin"):
        # Special case: On macOS, 'recon-all -long' can failed if the $SUBJECTS_DIR is too long
        # To circumvent this issue, we create a sym link in $(TMP) so that $SUBJECTS_DIR is a short path
        subjects_dir = mkdtemp()
        cprint(
            msg=
            (f"Needs to create a $SUBJECTS_DIR folder "
             f"in {subjects_dir} for {image_id.replace('_', ' | ')} (macOS case)."
             ),
            lvl="warning",
        )
    else:
        subjects_dir = os.path.join(output_dir, image_id)

    os.makedirs(subjects_dir, exist_ok=True)

    # Create symbolic link containing cross-sectional segmentation(s) in SUBJECTS_DIR so that recon-all can run
    for s_id in read_sessions(caps_dir, participant_id, long_id):
        cross_sectional_path = os.path.join(
            caps_dir,
            "subjects",
            participant_id,
            s_id,
            "t1",
            "freesurfer_cross_sectional",
            f"{participant_id}_{s_id}",
        )
        os.symlink(
            cross_sectional_path,
            os.path.join(subjects_dir, f"{participant_id}_{s_id}"),
        )

    # Create symbolic links containing unbiased template in SUBJECTS_DIR so that recon-all can run
    template_path = os.path.join(
        caps_dir,
        "subjects",
        participant_id,
        long_id,
        "freesurfer_unbiased_template",
        f"{participant_id}_{long_id}",
    )
    os.symlink(template_path,
               os.path.join(subjects_dir, f"{participant_id}_{long_id}"))

    print_begin_image(image_id)

    return subjects_dir
Пример #18
0
def print_begin_pipeline(in_bids_or_caps_file: str) -> None:
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.ux import print_begin_image

    print_begin_image(get_subject_id(in_bids_or_caps_file))
Пример #19
0
    def build_input_node(self):
        """Build and connect an input node to the pipeline."""
        import os
        from colorama import Fore
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from clinica.iotools.utils.data_handling import check_volume_location_in_world_coordinate_system
        from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
        from clinica.utils.filemanip import extract_subjects_sessions_from_filename, save_participants_sessions
        from clinica.utils.inputs import clinica_file_reader
        from clinica.utils.input_files import T1W_NII
        from clinica.utils.stream import cprint
        from clinica.utils.ux import print_images_to_process, print_begin_image

        gic = '*'
        if self.parameters['group_id_caps'] is not None:
            gic = self.parameters['group_id_caps']

        all_errors = []
        if self.parameters['feature_type'] == 'fdg':
            try:
                input_files = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory, {
                        'pattern':
                        '*_pet_space-Ixi549Space_suvr-pons_mask-brain_fwhm-' +
                        str(self.parameters['full_width_at_half_maximum']) +
                        'mm_pet.nii*',
                        'description':
                        'pons normalized FDG PET image in MNI space (brain masked)',
                        'needed_pipeline':
                        'pet-volume'
                    })
            except ClinicaException as e:
                all_errors.append(e)
        elif self.parameters['feature_type'] == 'graymatter':
            try:
                input_files = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory, {
                        'pattern':
                        't1/spm/dartel/group-' + gic +
                        '/*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_fwhm-'
                        + str(self.parameters['full_width_at_half_maximum']) +
                        'mm_probability.nii.*',
                        'description':
                        'probability map of gray matter segmentation based on T1w image in MNI space',
                        'needed_pipeline':
                        't1-volume or t1-volume-existing-template'
                    })
            except ClinicaException as e:
                all_errors.append(e)

        else:
            if not self.parameters['custom_files']:
                raise ClinicaException(
                    Fore.RED +
                    '[Error] You did not specify the --custom_files flag in the command line for the feature type '
                    + Fore.Blue + self.parameters['feature_type'] + Fore.RED +
                    '! Clinica can\'t ' +
                    'know what file to use in your analysis ! Type: \n\t' +
                    Fore.BLUE + 'clinica run statistics-volume\n' + Fore.RED +
                    ' to have help on how to use the command line.' +
                    Fore.RESET)
            try:
                # If custom file are grabbed, information of fwhm is irrelevant and should not appear on final filenames
                self.parameters['full_width_at_half_maximum'] = None
                input_files = clinica_file_reader(
                    self.subjects, self.sessions, self.caps_directory, {
                        'pattern': self.parameters['custom_files'],
                        'description': 'custom file provided by user'
                    })
            except ClinicaException as e:
                all_errors.append(e)

        if len(all_errors) > 0:
            error_message = 'Clinica faced errors while trying to read files in your BIDS or CAPS directories.\n'
            for msg in all_errors:
                error_message += str(msg)
            raise ClinicaException(error_message)

        read_parameters_node = npe.Node(name="LoadingCLIArguments",
                                        interface=nutil.IdentityInterface(
                                            fields=self.get_input_fields(),
                                            mandatory_inputs=True),
                                        synchronize=True)
        read_parameters_node.inputs.input_files = input_files

        if len(self.subjects):
            print_images_to_process(self.subjects, self.sessions)
            cprint(
                'The pipeline will last a few minutes. Images generated by SPM will popup during the pipeline.'
            )
            print_begin_image('group-' + self.parameters['group_id'])

        self.connect([(read_parameters_node, self.input_node,
                       [('input_files', 'input_files')])])