Beispiel #1
0
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list,
                          conversion_dir):
    """Compute the paths to the PIB PET images and store them in a TSV file.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS
    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        get_images_pet,
    )

    pet_pib_col = [
        "Phase",
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Series_ID",
        "Image_ID",
        "Original",
    ]
    pet_pib_df = pd.DataFrame(columns=pet_pib_col)
    pet_pib_dfs_list = []

    # Loading needed .csv files
    pibqc = pd.read_csv(path.join(csv_dir, "PIBQC.csv"),
                        sep=",",
                        low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, "PET_META_LIST.csv"),
                                sep=",",
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for PIB PET images
        pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))]

        sequences_preprocessing_step = ["PIB Co-registered, Averaged"]
        subj_dfs_list = get_images_pet(
            subj,
            pet_qc_subj,
            subject_pet_meta,
            pet_pib_col,
            "PIB-PET",
            sequences_preprocessing_step,
            viscode_field="VISCODE",
        )
        if subj_dfs_list:
            pet_pib_dfs_list += subj_dfs_list

    if pet_pib_dfs_list:
        pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = []

    # Removing known exceptions from images to convert
    if not pet_pib_df.empty:
        error_ind = pet_pib_df.index[pet_pib_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                      axis=1)]
        pet_pib_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_pib_df, source_dir, "PIB", "I", "Image_ID")
    images.to_csv(path.join(conversion_dir, "pib_pet_paths.tsv"),
                  sep="\t",
                  index=False)

    return images
Beispiel #2
0
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to the FDG PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    import operator
    from os import path
    from functools import reduce
    from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path
    from clinica.utils.stream import cprint

    pet_fdg_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original'
    ]
    pet_fdg_df = pd.DataFrame(columns=pet_fdg_col)

    # Loading needed .csv files
    petqc = pd.read_csv(path.join(csv_dir, 'PETQC.csv'),
                        sep=',',
                        low_memory=False)
    petqc3 = pd.read_csv(path.join(csv_dir, 'PETC3.csv'),
                         sep=',',
                         low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.shape[0] < 1:
            # TODO Log somewhere subjects without FDG-PET images metadata
            continue

        # QC for FDG PET images for ADNI 1, GO and 2
        pet_qc_1go2_subj = petqc[(petqc.PASS == 1)
                                 & (petqc.RID == int(subj[-4:]))]

        # QC for FDG PET images for ADNI 3
        pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1)
                              & (petqc3.RID == int(subj[-4:]))]
        pet_qc3_subj.insert(0, 'EXAMDATE', pet_qc3_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        pet_qc_subj = pd.concat([pet_qc_1go2_subj, pet_qc3_subj],
                                axis=0,
                                ignore_index=True,
                                sort=False)

        for visit in list(pet_qc_subj.VISCODE2.unique()):
            pet_qc_visit = pet_qc_subj[pet_qc_subj.VISCODE2 == visit]

            # If there are several scans for a timepoint we keep image acquired last (higher LONIUID)
            pet_qc_visit = pet_qc_visit.sort_values("LONIUID", ascending=False)

            qc_visit = pet_qc_visit.iloc[0]

            # Corresponding LONI image ID for original scan in PET Meta List
            int_image_id = int(qc_visit.LONIUID[1:])

            original_pet_meta = subject_pet_meta[
                (subject_pet_meta['Orig/Proc'] == 'Original')
                & (subject_pet_meta['Image ID'] == int_image_id)
                &
                (subject_pet_meta.Sequence.map(lambda s:
                                               (s.lower().find('early') < 0)))]
            # If no corresponding FDG PET metadata for an original image,
            # take scan at the same date containing FDG in sequence name
            if original_pet_meta.shape[0] < 1:
                original_pet_meta = subject_pet_meta[
                    (subject_pet_meta['Orig/Proc'] == 'Original')
                    & (subject_pet_meta.Sequence.map(
                        lambda x: (x.lower().find('fdg') > -1)))
                    & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)]

                if original_pet_meta.shape[0] < 1:
                    # TODO Log somewhere QC visits without image metadata
                    cprint('No FDG-PET images metadata for subject - ' + subj +
                           ' for visit ' + qc_visit.VISCODE2)
                    continue

            original_image = original_pet_meta.iloc[0]

            # Co-registered and Averaged image with the same Series ID of the original image
            averaged_pet_meta = subject_pet_meta[
                (subject_pet_meta['Sequence'] == 'Co-registered, Averaged')
                &
                (subject_pet_meta['Series ID'] == original_image['Series ID'])]

            # If an explicit Co-registered, Averaged image does not exist,
            # the original image is already in that preprocessing stage.

            if averaged_pet_meta.shape[0] < 1:
                sel_image = original_image
                original = True
            else:
                sel_image = averaged_pet_meta.iloc[0]
                original = False

            visit = sel_image.Visit
            sequence = replace_sequence_chars(sel_image.Sequence)
            date = sel_image['Scan Date']
            study_id = sel_image['Study ID']
            series_id = sel_image['Series ID']
            image_id = sel_image['Image ID']

            row_to_append = pd.DataFrame([[
                qc_visit.Phase, subj, qc_visit.VISCODE2,
                str(visit), sequence, date,
                str(study_id),
                str(series_id),
                str(image_id), original
            ]],
                                         columns=pet_fdg_col)
            pet_fdg_df = pet_fdg_df.append(row_to_append, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # NONAME.nii
        ('031_S_0294', 'bl'),
        ('037_S_1421', 'm36'),
        ('037_S_1078', 'm36'),

        # Empty folders
        ('941_S_1195', 'm48'),
        ('005_S_0223', 'm12')
    ]

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((pet_fdg_df.Subject_ID == conv_error[0])
                             & (pet_fdg_df.VISCODE == conv_error[1]))

    indices_to_remove = pet_fdg_df.index[reduce(operator.or_, error_indices,
                                                False)]
    pet_fdg_df.drop(indices_to_remove, inplace=True)

    images = find_image_path(pet_fdg_df, source_dir, 'FDG', 'I', 'Image_ID')

    fdg_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(fdg_csv_path):
        os.mkdir(fdg_csv_path)
    images.to_csv(path.join(fdg_csv_path, 'fdg_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #3
0
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """Compute the paths to the FDG PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    from os import path
    from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path

    pet_fdg_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
                   'Series_ID', 'Image_ID', 'Original']
    pet_fdg_df = pd.DataFrame(columns=pet_fdg_col)
    pet_fdg_dfs_list = []

    # Loading needed .csv files
    petqc = pd.read_csv(path.join(csv_dir, 'PETQC.csv'), sep=',', low_memory=False)
    petqc3 = pd.read_csv(path.join(csv_dir, 'PETC3.csv'), sep=',', low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for FDG PET images for ADNI 1, GO and 2
        pet_qc_1go2_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))]

        # QC for FDG PET images for ADNI 3
        pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1) & (petqc3.RID == int(subj[-4:]))]
        pet_qc3_subj.insert(0, 'EXAMDATE', pet_qc3_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        pet_qc_subj = pd.concat([pet_qc_1go2_subj, pet_qc3_subj], axis=0, ignore_index=True, sort=False)

        sequences_preprocessing_step = ['Co-registered, Averaged']
        subj_dfs_list = get_images_pet(subj, pet_qc_subj, subject_pet_meta, pet_fdg_col, 'FDG-PET',
                                       sequences_preprocessing_step)
        if subj_dfs_list:
            pet_fdg_dfs_list += subj_dfs_list

    if pet_fdg_dfs_list:
        # Concatenating dataframes into one
        pet_fdg_df = pd.concat(pet_fdg_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # NONAME.nii
                         ('031_S_0294', 'bl'),
                         ('037_S_1421', 'm36'),
                         ('037_S_1078', 'm36'),

                         # Empty folders
                         ('941_S_1195', 'm48'),
                         ('005_S_0223', 'm12')]

    # Removing known exceptions from images to convert
    if not pet_fdg_df.empty:
        error_ind = pet_fdg_df.index[pet_fdg_df.apply(lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors),
                                                      axis=1)]
        pet_fdg_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_fdg_df, source_dir, 'FDG', 'I', 'Image_ID')

    fdg_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(fdg_csv_path):
        os.mkdir(fdg_csv_path)
    images.to_csv(path.join(fdg_csv_path, 'fdg_pet_paths.tsv'), sep='\t', index=False)

    return images
Beispiel #4
0
def compute_fmri_path(source_dir, csv_dir, dest_dir, subjs_list,
                      conversion_dir):
    """Compute the paths to fMR images.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns:
        pandas Dataframe containing the path for each fmri
    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        visits_to_timepoints,
    )

    fmri_col = [
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Field_Strength",
        "Series_ID",
        "Image_ID",
    ]
    fmri_df = pd.DataFrame(columns=fmri_col)
    fmri_dfs_list = []

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, "ADNIMERGE.csv"),
                             sep=",",
                             low_memory=False)

    mayo_mri_qc = pd.read_csv(
        path.join(csv_dir, "MAYOADIRL_MRI_IMAGEQC_12_08_15.csv"),
        sep=",",
        low_memory=False,
    )
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == "fMRI"]
    mayo_mri_qc.columns = [x.upper() for x in mayo_mri_qc.columns]

    mayo_mri_qc3 = pd.read_csv(path.join(csv_dir,
                                         "MAYOADIRL_MRI_QUALITY_ADNI3.csv"),
                               sep=",",
                               low_memory=False)
    mayo_mri_qc3 = mayo_mri_qc3[mayo_mri_qc3.SERIES_TYPE == "EPB"]

    # Concatenating visits in both QC files
    mayo_mri_qc = pd.concat([mayo_mri_qc, mayo_mri_qc3],
                            axis=0,
                            ignore_index=True,
                            sort=False)

    mri_list = pd.read_csv(path.join(csv_dir, "MRILIST.csv"),
                           sep=",",
                           low_memory=False)

    # Selecting only fMRI images that are not Multiband
    mri_list = mri_list[mri_list.SEQUENCE.str.contains(
        "MRI")]  # 'MRI' includes all fMRI and fMRI scans, but not others
    unwanted_sequences = ["MB"]
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: not any(subs in x for subs in unwanted_sequences))]

    # We will convert the images for each subject in the subject list
    for subj in subjs_list:

        # Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values("EXAMDATE")

        mri_list_subj = mri_list[mri_list.SUBJECT == subj]
        mri_list_subj = mri_list_subj.sort_values("SCANDATE")

        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints(subj, mri_list_subj, adnimerge_subj,
                                      "fMRI")

        for visit_info in visits.keys():
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            visit_mri_list = mri_list_subj[mri_list_subj.VISIT == visit_str]
            image = fmri_image(subj, timepoint, visits[visit_info],
                               visit_mri_list, mayo_mri_qc_subj)

            if image is not None:
                row_to_append = pd.DataFrame(
                    image,
                    index=[
                        "i",
                    ],
                )
                fmri_dfs_list.append(row_to_append)

    if fmri_dfs_list:
        fmri_df = pd.concat(fmri_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [
        ("006_S_4485", "m84"),
        ("123_S_4127", "m96"),
        # Eq_1
        ("094_S_4503", "m24"),
        ("009_S_4388", "m72"),
        ("036_S_6088", "bl"),
        ("036_S_6134", "bl"),
        ("016_S_6802", "bl"),
        ("016_S_6816", "bl"),
        ("126_S_4891", "m84"),
        # Multiple images
        ("029_S_2395", "m72"),
    ]

    # Removing known exceptions from images to convert
    if not fmri_df.empty:
        error_ind = fmri_df.index[fmri_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                axis=1)]
        fmri_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(fmri_df, source_dir, "fMRI", "S", "Series_ID")
    images.to_csv(path.join(conversion_dir, "fmri_paths.tsv"),
                  sep="\t",
                  index=False)

    return images
Beispiel #5
0
def compute_t1_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """Compute the paths to T1 MR images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the T1 MR images that will be converted into BIDS

    """

    from os import path, makedirs

    import pandas as pd

    from clinica.utils.stream import cprint
    from clinica.iotools.converters.adni_to_bids.adni_utils import visits_to_timepoints, find_image_path

    t1_col_df = [
        'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
        'Field_Strength', 'Series_ID', 'Image_ID', 'Original'
    ]
    t1_df = pd.DataFrame(columns=t1_col_df)
    t1_dfs_list = []

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, 'ADNIMERGE.csv'),
                             sep=',',
                             low_memory=False)
    mprage_meta = pd.read_csv(path.join(csv_dir, 'MPRAGEMETA.csv'),
                              sep=',',
                              low_memory=False)
    mri_quality = pd.read_csv(path.join(csv_dir, 'MRIQUALITY.csv'),
                              sep=',',
                              low_memory=False)
    mayo_mri_qc = pd.read_csv(path.join(csv_dir,
                                        'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv'),
                              sep=',',
                              low_memory=False)
    # Keep only T1 scans
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == 'T1']

    # We will convert the images for each subject in the subject list
    for subj in subjs_list:

        # Filter ADNIMERGE, MPRAGE METADATA and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values('EXAMDATE')

        mprage_meta_subj = mprage_meta[mprage_meta.SubjectID == subj]
        mprage_meta_subj = mprage_meta_subj.sort_values('ScanDate')

        mri_quality_subj = mri_quality[mri_quality.RID == int(subj[-4:])]
        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints(subj, mprage_meta_subj, adnimerge_subj,
                                      "T1", "Visit", "ScanDate")

        for visit_info in visits.keys():
            cohort = visit_info[1]
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            image_dict = None

            if cohort in ('ADNI1', 'ADNIGO', 'ADNI2'):
                image_dict = adni1go2_image(
                    subj,
                    timepoint,
                    visit_str,
                    mprage_meta_subj,
                    mri_quality_subj,
                    mayo_mri_qc_subj,
                    preferred_field_strength=1.5 if cohort == 'ADNI1' else 3.0)
            elif cohort == 'ADNI3':
                image_dict = adni3_image(subj, timepoint, visit_str,
                                         mprage_meta_subj, mayo_mri_qc_subj)
            else:
                cprint("Subject %s visit %s belongs to an unknown cohort: %s" %
                       (subj, visit_str, cohort))

            if image_dict is not None:
                row_to_append = pd.DataFrame(image_dict, index=[
                    'i',
                ])
                t1_dfs_list.append(row_to_append)

    if t1_dfs_list:
        t1_df = pd.concat(t1_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1
        ('031_S_0830', 'm48'),
        ('100_S_0995', 'm18'),
        ('031_S_0867', 'm48'),
        ('100_S_0892', 'm18'),
        # Empty folders
        # ('029_S_0845', 'm24'),
        # ('094_S_1267', 'm24'),
        # ('029_S_0843', 'm24'),
        # ('027_S_0307', 'm48'),
        # ('057_S_1269', 'm24'),
        # ('036_S_4899', 'm03'),
        # ('033_S_1016', 'm120'),
        # ('130_S_4984', 'm12'),
        # ('027_S_4802', 'm06'),
        # ('131_S_0409', 'bl'),
        # ('082_S_4224', 'm24'),
        # ('006_S_4960', 'bl'),
        # ('006_S_4960', 'm03'),
        # ('006_S_4960', 'm06'),
        # ('006_S_4960', 'm12'),
        # ('006_S_4960', 'm24'),
        # ('006_S_4960', 'm36'),
        # ('006_S_4960', 'm72'),
        # ('022_S_5004', 'bl'),
        # ('022_S_5004', 'm03'),
        # Several images: T1wa ...
        ('006_S_4485', 'm84'),
        ('029_S_2395', 'm72'),
        ('114_S_6039', 'bl'),
        ('016_S_4952', 'm48')
    ]

    # Removing known exceptions from images to convert
    if not t1_df.empty:
        error_indices = t1_df.index[t1_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                axis=1)]
        t1_df.drop(error_indices, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(t1_df, source_dir, 'T1', 'S', 'Series_ID')

    # Store the paths inside a file called conversion_info inside the input directory
    t1_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(t1_tsv_path):
        makedirs(t1_tsv_path)
    images.to_csv(path.join(t1_tsv_path, 't1_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #6
0
                         ('153_S_6274', 'bl'),
                         ('006_S_4485', 'm84'),
                         ('153_S_6237', 'bl'),
                         ('153_S_6336', 'bl'),
                         ('153_S_6450', 'bl'),
                         ('003_S_4441', 'm12'),
                         # Several output images
                         ('029_S_2395', 'm72')]

    # Removing known exceptions from images to convert
    if not dwi_df.empty:
        error_ind = dwi_df.index[dwi_df.apply(lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)]
        dwi_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(dwi_df, source_dir, 'DWI', 'S', 'Series_ID')

    dwi_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(dwi_tsv_path):
        mkdir(dwi_tsv_path)
    images.to_csv(path.join(dwi_tsv_path, 'dwi_paths.tsv'), sep='\t', index=False)

    return images


def dwi_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj):
    """
    One image among those in the input list is chosen according to QC
    and then correspoding metadata is extracted to a dictionary

    Args:
Beispiel #7
0
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """Compute the paths to the PIB PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    from os import path
    from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path

    pet_pib_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original'
    ]
    pet_pib_df = pd.DataFrame(columns=pet_pib_col)
    pet_pib_dfs_list = []

    # Loading needed .csv files
    pibqc = pd.read_csv(path.join(csv_dir, 'PIBQC.csv'),
                        sep=',',
                        low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for PIB PET images
        pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))]

        sequences_preprocessing_step = ['PIB Co-registered, Averaged']
        subj_dfs_list = get_images_pet(subj,
                                       pet_qc_subj,
                                       subject_pet_meta,
                                       pet_pib_col,
                                       'PIB-PET',
                                       sequences_preprocessing_step,
                                       viscode_field="VISCODE")
        if subj_dfs_list:
            pet_pib_dfs_list += subj_dfs_list

    if pet_pib_dfs_list:
        pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = []

    # Removing known exceptions from images to convert
    if not pet_pib_df.empty:
        error_ind = pet_pib_df.index[pet_pib_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                      axis=1)]
        pet_pib_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_pib_df, source_dir, 'PIB', 'I', 'Image_ID')

    pib_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(pib_csv_path):
        os.mkdir(pib_csv_path)
    images.to_csv(path.join(pib_csv_path, 'pib_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #8
0
def compute_dwi_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute paths to DW images to convert to BIDS

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: pandas dataframe that contains the path to all the DW images to convert

    """

    import operator
    from os import path, mkdir
    from functools import reduce
    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, visits_to_timepoints_mrilist

    dwi_col_df = [
        'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
        'Series_ID', 'Image_ID', 'Field_Strength'
    ]
    dwi_df = pd.DataFrame(columns=dwi_col_df)

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, 'ADNIMERGE.csv'),
                             sep=',',
                             low_memory=False)

    mayo_mri_qc = pd.read_csv(path.join(csv_dir,
                                        'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv'),
                              sep=',',
                              low_memory=False)
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == 'DTI']

    mri_list = pd.read_csv(path.join(csv_dir, 'MRILIST.csv'),
                           sep=',',
                           low_memory=False)

    # Selecting only DTI images that are not Multiband, processed or enchanced images
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: x.lower().find('dti') > -1)]
    unwanted_sequences = ['MB', 'ADC', 'FA', 'TRACEW', 'Enhanced', 'Reg']
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: not any(subs in x for subs in unwanted_sequences))]

    for subj in subjs_list:

        # Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values('EXAMDATE')

        mri_list_subj = mri_list[mri_list.SUBJECT == subj]
        mri_list_subj = mri_list_subj.sort_values('SCANDATE')

        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints_mrilist(subj, mri_list_subj,
                                              adnimerge_subj, "DWI")

        for visit_info in visits.keys():
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            visit_mri_list = mri_list_subj[mri_list_subj.VISIT == visit_str]
            axial = dwi_image(subj, timepoint, visits[visit_info],
                              visit_mri_list, mayo_mri_qc_subj)

            if axial is not None:
                row_to_append = pd.DataFrame(axial, index=[
                    'i',
                ])
                # TODO Replace iteratively appending by pandas.concat
                dwi_df = dwi_df.append(row_to_append,
                                       ignore_index=True,
                                       sort=False)

    # Exceptions
    # ==========
    conversion_errors = [('029_S_2395', 'm60'), ('029_S_0824', 'm108'),
                         ('029_S_0914', 'm108'), ('027_S_2219', 'm36'),
                         ('129_S_2332', 'm12'), ('029_S_4384', 'm48'),
                         ('029_S_4385', 'm48'), ('029_S_4585', 'm48'),
                         ('016_S_4591', 'm24'), ('094_S_4630', 'm06'),
                         ('094_S_4649', 'm06'), ('029_S_5219', 'm24'),
                         ('094_S_2238', 'm48'), ('129_S_4287', 'bl'),
                         ('007_S_4611', 'm03'), ('016_S_4638', 'bl'),
                         ('027_S_5118', 'bl'), ('098_S_4018', 'bl'),
                         ('098_S_4003', 'm12'), ('016_S_4584', 'm24'),
                         ('016_S_5007', 'm12'), ('129_S_2347', 'm06'),
                         ('129_S_4220', 'bl'), ('007_S_2058', 'm12'),
                         ('016_S_2007', 'm06'), ('020_S_6358', 'bl'),
                         ('114_S_6039', 'm12'), ('114_S_6057', 'bl'),
                         ('153_S_6274', 'bl'), ('006_S_4485', 'm84'),
                         ('153_S_6237', 'bl'), ('153_S_6336', 'bl'),
                         ('153_S_6450', 'bl')]

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((dwi_df.Subject_ID == conv_error[0])
                             & (dwi_df.VISCODE == conv_error[1]))

    if error_indices:
        indices_to_remove = dwi_df.index[reduce(operator.or_, error_indices,
                                                False)]
        dwi_df.drop(indices_to_remove, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(dwi_df, source_dir, 'DWI', 'S', 'Series_ID')

    dwi_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(dwi_tsv_path):
        mkdir(dwi_tsv_path)
    images.to_csv(path.join(dwi_tsv_path, 'dwi_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #9
0
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir):
    """Compute the paths to the FDG PET images and store them in a TSV file.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS
    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        get_images_pet,
    )

    pet_fdg_col = [
        "Phase",
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Series_ID",
        "Image_ID",
        "Original",
    ]
    pet_fdg_df = pd.DataFrame(columns=pet_fdg_col)
    pet_fdg_dfs_list = []

    # Loading needed .csv files
    petqc = pd.read_csv(path.join(csv_dir, "PETQC.csv"), sep=",", low_memory=False)
    petqc3 = pd.read_csv(path.join(csv_dir, "PETC3.csv"), sep=",", low_memory=False)
    pet_meta_list = pd.read_csv(
        path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False
    )

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for FDG PET images for ADNI 1, GO and 2
        pet_qc_1go2_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))]

        # QC for FDG PET images for ADNI 3
        pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1) & (petqc3.RID == int(subj[-4:]))]
        pet_qc3_subj.insert(0, "EXAMDATE", pet_qc3_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        pet_qc_subj = pd.concat(
            [pet_qc_1go2_subj, pet_qc3_subj], axis=0, ignore_index=True, sort=False
        )

        sequences_preprocessing_step = ["Co-registered, Averaged"]
        subj_dfs_list = get_images_pet(
            subj,
            pet_qc_subj,
            subject_pet_meta,
            pet_fdg_col,
            "FDG-PET",
            sequences_preprocessing_step,
        )
        if subj_dfs_list:
            pet_fdg_dfs_list += subj_dfs_list

    if pet_fdg_dfs_list:
        # Concatenating dataframes into one
        pet_fdg_df = pd.concat(pet_fdg_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # NONAME.nii
        ("031_S_0294", "bl"),
        ("037_S_1421", "m36"),
        ("037_S_1078", "m36"),
        # Empty folders
        ("941_S_1195", "m48"),
        ("005_S_0223", "m12"),
    ]

    # Removing known exceptions from images to convert
    if not pet_fdg_df.empty:
        error_ind = pet_fdg_df.index[
            pet_fdg_df.apply(
                lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1
            )
        ]
        pet_fdg_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_fdg_df, source_dir, "FDG", "I", "Image_ID")
    images.to_csv(path.join(conversion_dir, "fdg_pet_paths.tsv"), sep="\t", index=False)

    return images
Beispiel #10
0
def compute_t1_paths(source_dir, csv_dir, dest_dir, subjs_list,
                     conversion_dir):
    """Compute the paths to T1 MR images and store them in a TSV file.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns:
        images: a dataframe with all the paths to the T1 MR images that will be converted into BIDS
    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        visits_to_timepoints,
    )
    from clinica.utils.stream import cprint

    t1_col_df = [
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Field_Strength",
        "Series_ID",
        "Image_ID",
        "Original",
    ]
    t1_df = pd.DataFrame(columns=t1_col_df)
    t1_dfs_list = []

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, "ADNIMERGE.csv"),
                             sep=",",
                             low_memory=False)
    mprage_meta = pd.read_csv(path.join(csv_dir, "MPRAGEMETA.csv"),
                              sep=",",
                              low_memory=False)
    mri_quality = pd.read_csv(path.join(csv_dir, "MRIQUALITY.csv"),
                              sep=",",
                              low_memory=False)
    mayo_mri_qc = pd.read_csv(
        path.join(csv_dir, "MAYOADIRL_MRI_IMAGEQC_12_08_15.csv"),
        sep=",",
        low_memory=False,
    )
    # Keep only T1 scans
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == "T1"]

    # We will convert the images for each subject in the subject list
    for subj in subjs_list:

        # Filter ADNIMERGE, MPRAGE METADATA and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values("EXAMDATE")

        mprage_meta_subj = mprage_meta[mprage_meta.SubjectID == subj]
        mprage_meta_subj = mprage_meta_subj.sort_values("ScanDate")

        mri_quality_subj = mri_quality[mri_quality.RID == int(subj[-4:])]
        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints(subj, mprage_meta_subj, adnimerge_subj,
                                      "T1", "Visit", "ScanDate")

        for visit_info in visits.keys():
            cohort = visit_info[1]
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            image_dict = None

            if cohort in ("ADNI1", "ADNIGO", "ADNI2"):
                image_dict = adni1go2_image(
                    subj,
                    timepoint,
                    visit_str,
                    mprage_meta_subj,
                    mri_quality_subj,
                    mayo_mri_qc_subj,
                    preferred_field_strength=1.5 if cohort == "ADNI1" else 3.0,
                )
            elif cohort == "ADNI3":
                image_dict = adni3_image(subj, timepoint, visit_str,
                                         mprage_meta_subj, mayo_mri_qc_subj)
            else:
                cprint(
                    f"Subject {subj} visit {visit_str} belongs to an unknown cohort: {cohort}"
                )

            if image_dict is not None:
                row_to_append = pd.DataFrame(
                    image_dict,
                    index=[
                        "i",
                    ],
                )
                t1_dfs_list.append(row_to_append)

    if t1_dfs_list:
        t1_df = pd.concat(t1_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1
        ("031_S_0830", "m48"),
        ("100_S_0995", "m18"),
        ("031_S_0867", "m48"),
        ("100_S_0892", "m18"),
        ("003_S_6264", "m12"),
        # Empty folders
        # ('029_S_0845', 'm24'),
        # ('094_S_1267', 'm24'),
        # ('029_S_0843', 'm24'),
        # ('027_S_0307', 'm48'),
        # ('057_S_1269', 'm24'),
        # ('036_S_4899', 'm03'),
        # ('033_S_1016', 'm120'),
        # ('130_S_4984', 'm12'),
        # ('027_S_4802', 'm06'),
        # ('131_S_0409', 'bl'),
        # ('082_S_4224', 'm24'),
        # ('006_S_4960', 'bl'),
        # ('006_S_4960', 'm03'),
        # ('006_S_4960', 'm06'),
        # ('006_S_4960', 'm12'),
        # ('006_S_4960', 'm24'),
        # ('006_S_4960', 'm36'),
        # ('006_S_4960', 'm72'),
        # ('022_S_5004', 'bl'),
        # ('022_S_5004', 'm03'),
        # Several images: T1wa ...
        ("006_S_4485", "m84"),
        ("029_S_2395", "m72"),
        ("114_S_6039", "bl"),
        ("016_S_4952", "m48"),
    ]

    # Removing known exceptions from images to convert
    if not t1_df.empty:
        error_indices = t1_df.index[t1_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                axis=1)]
        t1_df.drop(error_indices, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(t1_df, source_dir, "T1", "S", "Series_ID")
    images.to_csv(path.join(conversion_dir, "t1_paths.tsv"),
                  sep="\t",
                  index=False)

    return images
Beispiel #11
0
def compute_av45_fbb_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to the AV45 and Florbetaben PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    from os import path
    from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path

    pet_amyloid_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer'
    ]
    pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col)
    pet_amyloid_dfs_list = []

    # Loading needed .csv files
    av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'),
                         sep=',',
                         low_memory=False)
    amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'),
                        sep=',',
                        low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for AV45 PET images for ADNI 1, GO and 2
        av45_qc_subj = av45qc[(av45qc.PASS == 1)
                              & (av45qc.RID == int(subj[-4:]))]

        # QC for Amyloid PET images for ADNI 3
        amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1)
                            & (amyqc.RID == int(subj[-4:]))]
        amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj],
                                    axis=0,
                                    ignore_index=True,
                                    sort=False)

        sequences_preprocessing_step = [
            'AV45 Co-registered, Averaged', 'FBB Co-registered, Averaged'
        ]
        subj_dfs_list = get_images_pet(subj, amyloid_qc_subj, subject_pet_meta,
                                       pet_amyloid_col, 'Amyloid-PET',
                                       sequences_preprocessing_step)
        if subj_dfs_list:
            pet_amyloid_dfs_list += subj_dfs_list

    if pet_amyloid_dfs_list:
        pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1
        ('128_S_2220', 'm48'),
        # Several output images
        ('098_S_4275', 'm84')
    ]

    # Removing known exceptions from images to convert
    if not pet_amyloid_df.empty:
        error_ind = pet_amyloid_df.index[pet_amyloid_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                              axis=1)]
        pet_amyloid_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I',
                             'Image_ID')

    amyloid_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(amyloid_csv_path):
        os.mkdir(amyloid_csv_path)
    images.to_csv(path.join(amyloid_csv_path, 'amyloid_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #12
0
def compute_fmri_path(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to fMR images

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns: pandas Dataframe containing the path for each fmri

    """

    from os import path, mkdir
    import pandas as pd
    from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, visits_to_timepoints

    fmri_col = [
        'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
        'Field_Strength', 'Series_ID', 'Image_ID'
    ]
    fmri_df = pd.DataFrame(columns=fmri_col)
    fmri_dfs_list = []

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, 'ADNIMERGE.csv'),
                             sep=',',
                             low_memory=False)

    mayo_mri_qc = pd.read_csv(path.join(csv_dir,
                                        'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv'),
                              sep=',',
                              low_memory=False)
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == 'fMRI']
    mayo_mri_qc.columns = [x.upper() for x in mayo_mri_qc.columns]

    mayo_mri_qc3 = pd.read_csv(path.join(csv_dir,
                                         'MAYOADIRL_MRI_QUALITY_ADNI3.csv'),
                               sep=',',
                               low_memory=False)
    mayo_mri_qc3 = mayo_mri_qc3[mayo_mri_qc3.SERIES_TYPE == 'EPB']

    # Concatenating visits in both QC files
    mayo_mri_qc = pd.concat([mayo_mri_qc, mayo_mri_qc3],
                            axis=0,
                            ignore_index=True,
                            sort=False)

    mri_list = pd.read_csv(path.join(csv_dir, 'MRILIST.csv'),
                           sep=',',
                           low_memory=False)

    # Selecting only fMRI images that are not Multiband
    mri_list = mri_list[mri_list.SEQUENCE.str.contains(
        'MRI')]  # 'MRI' includes all fMRI and fMRI scans, but not others
    unwanted_sequences = ['MB']
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: not any(subs in x for subs in unwanted_sequences))]

    # We will convert the images for each subject in the subject list
    for subj in subjs_list:

        # Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values('EXAMDATE')

        mri_list_subj = mri_list[mri_list.SUBJECT == subj]
        mri_list_subj = mri_list_subj.sort_values('SCANDATE')

        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints(subj, mri_list_subj, adnimerge_subj,
                                      "fMRI")

        for visit_info in visits.keys():
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            visit_mri_list = mri_list_subj[mri_list_subj.VISIT == visit_str]
            image = fmri_image(subj, timepoint, visits[visit_info],
                               visit_mri_list, mayo_mri_qc_subj)

            if image is not None:
                row_to_append = pd.DataFrame(image, index=[
                    'i',
                ])
                fmri_dfs_list.append(row_to_append)

    if fmri_dfs_list:
        fmri_df = pd.concat(fmri_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [
        ('006_S_4485', 'm84'),
        # Eq_1
        ('094_S_4503', 'm24'),
        ('009_S_4388', 'm72'),
        ('036_S_6088', 'bl'),
        ('036_S_6134', 'bl'),
        # Multiple images
        ('029_S_2395', 'm72')
    ]

    # Removing known exceptions from images to convert
    if not fmri_df.empty:
        error_ind = fmri_df.index[fmri_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                axis=1)]
        fmri_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(fmri_df, source_dir, 'fMRI', 'S', 'Series_ID')

    fmri_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(fmri_tsv_path):
        mkdir(fmri_tsv_path)
    images.to_csv(path.join(fmri_tsv_path, 'fmri_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #13
0
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to Tau PET images

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns: pandas Dataframe containing the path for each Tau PET image

    """

    import pandas as pd
    import os
    import operator
    from os import path
    from functools import reduce
    from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path
    from clinica.utils.stream import cprint

    pet_tau_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original'
    ]
    pet_tau_df = pd.DataFrame(columns=pet_tau_col)

    # Loading needed .csv files
    tauqc = pd.read_csv(path.join(csv_dir, 'TAUQC.csv'),
                        sep=',',
                        low_memory=False)
    tauqc3 = pd.read_csv(path.join(csv_dir, 'TAUQC3.csv'),
                         sep=',',
                         low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.shape[0] < 1:
            # TODO Log somewhere subjects without TAU PET images metadata
            continue

        # QC for TAU PET images for ADNI 2
        tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1)
                             & (tauqc.RID == int(subj[-4:]))]

        # QC for TAU PET images for ADNI 3
        tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1)
                              & (tauqc3.RID == int(subj[-4:]))]

        # Concatenating visits in both QC files
        tau_qc_subj = pd.concat([tau_qc2_subj, tau_qc3_subj],
                                axis=0,
                                ignore_index=True,
                                sort=False)

        for visit in list(tau_qc_subj.VISCODE2.unique()):
            # TODO Infer visit from ADNIMERGE visits
            if str(visit) == 'nan':
                continue

            pet_qc_visit = tau_qc_subj[tau_qc_subj.VISCODE2 == visit]

            # If there are several scans for a timepoint we keep image acquired last (higher LONIUID)
            pet_qc_visit = pet_qc_visit.sort_values("LONIUID", ascending=False)

            qc_visit = pet_qc_visit.iloc[0]

            # Corresponding LONI image ID for original scan in PET Meta List
            int_image_id = int(qc_visit.LONIUID[1:])

            original_pet_meta = subject_pet_meta[
                (subject_pet_meta['Orig/Proc'] == 'Original')
                & (subject_pet_meta['Image ID'] == int_image_id)]

            # If no corresponding TAU PET metadata for an original image,
            # take scan at the same date containing TAU or AV 45 in sequence name
            if original_pet_meta.shape[0] < 1:
                original_pet_meta = subject_pet_meta[
                    (subject_pet_meta['Orig/Proc'] == 'Original')
                    & subject_pet_meta.Sequence.map(lambda x: (
                        (x.lower().find('tau') > -1) |
                        (x.lower().find('av-1451') > -1) |
                        (x.lower().find('av1451') > -1)))
                    & (subject_pet_meta['Scan Date'] == qc_visit.SCANDATE)]

                if original_pet_meta.shape[0] < 1:
                    # TODO Log somewhere QC visits without image metadata
                    cprint('No TAU-PET images metadata for subject - ' + subj +
                           ' for visit ' + qc_visit.VISCODE2)
                    continue

            original_image = original_pet_meta.iloc[0]

            # Co-registered and Averaged image with the same Series ID of the original image
            averaged_pet_meta = subject_pet_meta[
                (subject_pet_meta['Sequence'] ==
                 'AV1451 Co-registered, Averaged')
                &
                (subject_pet_meta['Series ID'] == original_image['Series ID'])]

            # If an explicit AV1451 Co-registered, Averaged image does not exist,
            # the original image is already in that preprocessing stage.

            if averaged_pet_meta.shape[0] < 1:
                sel_image = original_image
                original = True
            else:
                sel_image = averaged_pet_meta.iloc[0]
                original = False

            visit = sel_image.Visit
            sequence = replace_sequence_chars(sel_image.Sequence)
            date = sel_image['Scan Date']
            study_id = sel_image['Study ID']
            series_id = sel_image['Series ID']
            image_id = sel_image['Image ID']

            row_to_append = pd.DataFrame([[
                qc_visit.Phase, subj, qc_visit.VISCODE2,
                str(visit), sequence, date,
                str(study_id),
                str(series_id),
                str(image_id), original
            ]],
                                         columns=pet_tau_col)
            pet_tau_df = pet_tau_df.append(row_to_append, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Multiple output images
        ('098_S_4275', 'm84')
    ]

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((pet_tau_df.Subject_ID == conv_error[0])
                             & (pet_tau_df.VISCODE == conv_error[1]))

    if error_indices:
        indices_to_remove = pet_tau_df.index[reduce(operator.or_,
                                                    error_indices, False)]
        pet_tau_df.drop(indices_to_remove, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(pet_tau_df, source_dir, 'TAU', 'I', 'Image_ID')

    tau_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(tau_csv_path):
        os.mkdir(tau_csv_path)
    images.to_csv(path.join(tau_csv_path, 'tau_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #14
0
        ("135_S_6284", "m12"),
        ("068_S_0127", "m180"),
        ("068_S_2187", "m120"),
        # Several output images
        ("114_S_6039", "bl"),
    ]

    # Removing known exceptions from images to convert
    if not flair_df.empty:
        error_ind = flair_df.index[flair_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                  axis=1)]
        flair_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(flair_df, source_dir, "FLAIR", "S", "Series_ID")
    images.to_csv(path.join(conversion_dir, "flair_paths.tsv"),
                  sep="\t",
                  index=False)

    return images


def flair_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj):
    """
    One image among those in the input list is chosen according to QC
    and then correspoding metadata is extracted to a dictionary

    Args:
        subject_id: Subject identifier
        timepoint: Visit code
Beispiel #15
0
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to the PIB PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    import operator
    from os import path
    from functools import reduce
    from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path
    from clinica.utils.stream import cprint

    pet_pib_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
                   'Series_ID', 'Image_ID', 'Original']
    pet_pib_df = pd.DataFrame(columns=pet_pib_col)

    # Loading needed .csv files
    pibqc = pd.read_csv(path.join(csv_dir, 'PIBQC.csv'), sep=',', low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.shape[0] < 1:
            # TODO Log somewhere subjects without PIB PET metadata
            continue

        # QC for PIB PET images
        pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))]

        for visit in list(pet_qc_subj.VISCODE.unique()):
            pet_qc_visit = pet_qc_subj[pet_qc_subj.VISCODE == visit]

            qc_visit = pet_qc_visit.iloc[0]

            # Corresponding LONI image ID for original scan in PET Meta List
            int_image_id = int(qc_visit.LONIUID[1:])

            original_pet_meta = subject_pet_meta[(subject_pet_meta['Orig/Proc'] == 'Original')
                                                 & (subject_pet_meta['Image ID'] == int_image_id)
                                                 & (subject_pet_meta.Sequence.map(lambda s: (s.lower().find('early')
                                                                                             < 0)))]
            # If no corresponding PIB PET metadata for an original image,
            # take scan at the same date containing PIB in sequence name
            if original_pet_meta.shape[0] < 1:
                original_pet_meta = subject_pet_meta[(subject_pet_meta['Orig/Proc'] == 'Original')
                                                     & (subject_pet_meta.Sequence.map(lambda x: (x.lower().find('pib')
                                                                                                 > -1)))
                                                     & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)]

                if original_pet_meta.shape[0] < 1:
                    # TODO Log somewhere QC visits without
                    cprint('No PIB-PET images metadata for subject - ' + subj + ' for visit ' + qc_visit.VISCODE)
                    continue

            original_image = original_pet_meta.iloc[0]

            # Co-registered and Averaged image with the same Series ID of the original image
            averaged_pet_meta = subject_pet_meta[(subject_pet_meta['Sequence'] == 'PIB Co-registered, Averaged')
                                                 & (subject_pet_meta['Series ID'] == original_image['Series ID'])]

            # If an explicit Co-registered, Averaged image does not exist,
            # the original image is already in that preprocessing stage.

            if averaged_pet_meta.shape[0] < 1:
                sel_image = original_image
                original = True
            else:
                sel_image = averaged_pet_meta.iloc[0]
                original = False

            visit = sel_image.Visit
            sequence = replace_sequence_chars(sel_image.Sequence)
            date = sel_image['Scan Date']
            study_id = sel_image['Study ID']
            series_id = sel_image['Series ID']
            image_id = sel_image['Image ID']

            row_to_append = pd.DataFrame(
                [['ADNI1', subj, qc_visit.VISCODE, str(visit), sequence, date, str(study_id), str(series_id),
                  str(image_id), original]],
                columns=pet_pib_col)
            pet_pib_df = pet_pib_df.append(row_to_append, ignore_index=True)

    # TODO check for exceptions
    # Exceptions
    # ==========
    conversion_errors = []

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((pet_pib_df.Subject_ID == conv_error[0])
                             & (pet_pib_df.VISCODE == conv_error[1]))

    if error_indices:
        indices_to_remove = pet_pib_df.index[reduce(operator.or_, error_indices, False)]
        pet_pib_df.drop(indices_to_remove, inplace=True)

    # DONE - Make a function reusing this code for different modalities
    # TODO check if it works properly

    images = find_image_path(pet_pib_df, source_dir, 'PIB', 'I', 'Image_ID')

    pib_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(pib_csv_path):
        os.mkdir(pib_csv_path)
    images.to_csv(path.join(pib_csv_path, 'pib_pet_paths.tsv'), sep='\t', index=False)

    return images
Beispiel #16
0
        ("130_S_6329", "bl"),
        ("027_S_6183", "m24"),
        ("123_S_6891", "bl"),
        # Several output images
        ("029_S_2395", "m72"),
    ]

    # Removing known exceptions from images to convert
    if not dwi_df.empty:
        error_ind = dwi_df.index[dwi_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                              axis=1)]
        dwi_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(dwi_df, source_dir, "DWI", "S", "Series_ID")
    images.to_csv(path.join(conversion_dir, "dwi_paths.tsv"),
                  sep="\t",
                  index=False)

    return images


def dwi_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj):
    """
    One image among those in the input list is chosen according to QC
    and then correspoding metadata is extracted to a dictionary.

    Args:
        subject_id: Subject identifier
        timepoint: Visit code
Beispiel #17
0
def compute_av45_fbb_pet_paths(
    source_dir, csv_dir, dest_dir, subjs_list, conversion_dir
):
    """Compute the paths to the AV45 and Florbetaben PET images and store them in a TSV file.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        get_images_pet,
    )

    pet_amyloid_col = [
        "Phase",
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Series_ID",
        "Image_ID",
        "Original",
        "Tracer",
    ]
    pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col)
    pet_amyloid_dfs_list = []

    # Loading needed .csv files
    av45qc = pd.read_csv(path.join(csv_dir, "AV45QC.csv"), sep=",", low_memory=False)
    amyqc = pd.read_csv(path.join(csv_dir, "AMYQC.csv"), sep=",", low_memory=False)
    pet_meta_list = pd.read_csv(
        path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False
    )

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for AV45 PET images for ADNI 1, GO and 2
        av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))]

        # QC for Amyloid PET images for ADNI 3
        amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))]
        amy_qc_subj.insert(0, "EXAMDATE", amy_qc_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        amyloid_qc_subj = pd.concat(
            [av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False
        )

        sequences_preprocessing_step = [
            "AV45 Co-registered, Averaged",
            "FBB Co-registered, Averaged",
        ]
        subj_dfs_list = get_images_pet(
            subj,
            amyloid_qc_subj,
            subject_pet_meta,
            pet_amyloid_col,
            "Amyloid-PET",
            sequences_preprocessing_step,
        )
        if subj_dfs_list:
            pet_amyloid_dfs_list += subj_dfs_list

    if pet_amyloid_dfs_list:
        pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1
        ("128_S_2220", "m48"),
        # Several output images
        ("098_S_4275", "m84"),
    ]

    # Removing known exceptions from images to convert
    if not pet_amyloid_df.empty:
        error_ind = pet_amyloid_df.index[
            pet_amyloid_df.apply(
                lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1
            )
        ]
        pet_amyloid_df.drop(error_ind, inplace=True)

    images = find_image_path(pet_amyloid_df, source_dir, "Amyloid", "I", "Image_ID")
    images.to_csv(
        path.join(conversion_dir, "amyloid_pet_paths.tsv"), sep="\t", index=False
    )

    return images
Beispiel #18
0
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to Tau PET images

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns: pandas Dataframe containing the path for each Tau PET image

    """

    import pandas as pd
    import os
    from os import path
    from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path

    pet_tau_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original'
    ]
    pet_tau_df = pd.DataFrame(columns=pet_tau_col)
    pet_tau_dfs_list = []

    # Loading needed .csv files
    tauqc = pd.read_csv(path.join(csv_dir, 'TAUQC.csv'),
                        sep=',',
                        low_memory=False)
    tauqc3 = pd.read_csv(path.join(csv_dir, 'TAUQC3.csv'),
                         sep=',',
                         low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for TAU PET images for ADNI 2
        tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1)
                             & (tauqc.RID == int(subj[-4:]))]

        # QC for TAU PET images for ADNI 3
        tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1)
                              & (tauqc3.RID == int(subj[-4:]))]

        # Concatenating visits in both QC files
        tau_qc_subj = pd.concat([tau_qc2_subj, tau_qc3_subj],
                                axis=0,
                                ignore_index=True,
                                sort=False)
        tau_qc_subj.rename(columns={"SCANDATE": "EXAMDATE"}, inplace=True)

        sequences_preprocessing_step = ['AV1451 Co-registered, Averaged']
        subj_dfs_list = get_images_pet(subj, tau_qc_subj, subject_pet_meta,
                                       pet_tau_col, 'TAU-PET',
                                       sequences_preprocessing_step)
        if subj_dfs_list:
            pet_tau_dfs_list += subj_dfs_list

    if pet_tau_dfs_list:
        pet_tau_df = pd.concat(pet_tau_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [  # Multiple output images
        ('098_S_4275', 'm84')
    ]

    # Removing known exceptions from images to convert
    if not pet_tau_df.empty:
        error_ind = pet_tau_df.index[pet_tau_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                      axis=1)]
        pet_tau_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(pet_tau_df, source_dir, 'TAU', 'I', 'Image_ID')

    tau_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(tau_csv_path):
        os.mkdir(tau_csv_path)
    images.to_csv(path.join(tau_csv_path, 'tau_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #19
0
        ('029_S_2395', 'm72'),
        ('130_S_6043', 'bl'),
        ('031_S_2018', 'bl'),
        # Several output images
        ('114_S_6039', 'bl')
    ]

    # Removing known exceptions from images to convert
    if not flair_df.empty:
        error_ind = flair_df.index[flair_df.apply(lambda x: (
            (x.Subject_ID, x.VISCODE) in conversion_errors),
                                                  axis=1)]
        flair_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(flair_df, source_dir, 'FLAIR', 'S', 'Series_ID')

    flair_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(flair_tsv_path):
        mkdir(flair_tsv_path)
    images.to_csv(path.join(flair_tsv_path, 'flair_paths.tsv'),
                  sep='\t',
                  index=False)

    return images


def flair_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj):
    """
    One image among those in the input list is chosen according to QC
    and then correspoding metadata is extracted to a dictionary
Beispiel #20
0
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir):
    """Compute the paths to Tau PET images.

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list
        conversion_dir: path to the TSV files including the paths to original images

    Returns: pandas Dataframe containing the path for each Tau PET image
    """
    from os import path

    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import (
        find_image_path,
        get_images_pet,
    )

    pet_tau_col = [
        "Phase",
        "Subject_ID",
        "VISCODE",
        "Visit",
        "Sequence",
        "Scan_Date",
        "Study_ID",
        "Series_ID",
        "Image_ID",
        "Original",
    ]
    pet_tau_df = pd.DataFrame(columns=pet_tau_col)
    pet_tau_dfs_list = []

    # Loading needed .csv files
    tauqc = pd.read_csv(path.join(csv_dir, "TAUQC.csv"), sep=",", low_memory=False)
    tauqc3 = pd.read_csv(path.join(csv_dir, "TAUQC3.csv"), sep=",", low_memory=False)
    pet_meta_list = pd.read_csv(
        path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False
    )

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]

        if subject_pet_meta.empty:
            continue

        # QC for TAU PET images for ADNI 2
        tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1) & (tauqc.RID == int(subj[-4:]))]

        # QC for TAU PET images for ADNI 3
        tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1) & (tauqc3.RID == int(subj[-4:]))]

        # Concatenating visits in both QC files
        tau_qc_subj = pd.concat(
            [tau_qc2_subj, tau_qc3_subj], axis=0, ignore_index=True, sort=False
        )
        tau_qc_subj.rename(columns={"SCANDATE": "EXAMDATE"}, inplace=True)

        sequences_preprocessing_step = ["AV1451 Co-registered, Averaged"]
        subj_dfs_list = get_images_pet(
            subj,
            tau_qc_subj,
            subject_pet_meta,
            pet_tau_col,
            "TAU-PET",
            sequences_preprocessing_step,
        )
        if subj_dfs_list:
            pet_tau_dfs_list += subj_dfs_list

    if pet_tau_dfs_list:
        pet_tau_df = pd.concat(pet_tau_dfs_list, ignore_index=True)

    # Exceptions
    # ==========
    conversion_errors = [("098_S_4275", "m84")]  # Multiple output images

    # Removing known exceptions from images to convert
    if not pet_tau_df.empty:
        error_ind = pet_tau_df.index[
            pet_tau_df.apply(
                lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1
            )
        ]
        pet_tau_df.drop(error_ind, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(pet_tau_df, source_dir, "TAU", "I", "Image_ID")
    images.to_csv(path.join(conversion_dir, "tau_pet_paths.tsv"), sep="\t", index=False)

    return images
Beispiel #21
0
def compute_av45_fbb_pet_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to the AV45 and Florbetaben PET images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the PET images that will be converted into BIDS

    """

    import pandas as pd
    import os
    import operator
    from os import path
    from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path
    from clinica.utils.stream import cprint
    from functools import reduce

    pet_amyloid_col = [
        'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',
        'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer'
    ]
    pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col)

    # Loading needed .csv files
    av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'),
                         sep=',',
                         low_memory=False)
    amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'),
                        sep=',',
                        low_memory=False)
    pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'),
                                sep=',',
                                low_memory=False)

    for subj in subjs_list:

        # PET images metadata for subject
        subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj]

        if subject_pet_meta.shape[0] < 1:
            # TODO Log somewhere subjects without Amyloid PET images
            # cprint('No Amyloid PET images metadata for subject - ' + subj)
            continue

        # QC for AV45 PET images for ADNI 1, GO and 2
        av45_qc_subj = av45qc[(av45qc.PASS == 1)
                              & (av45qc.RID == int(subj[-4:]))]

        # QC for Amyloid PET images for ADNI 3
        amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1)
                            & (amyqc.RID == int(subj[-4:]))]
        amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list())

        # Concatenating visits in both QC files
        amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj],
                                    axis=0,
                                    ignore_index=True,
                                    sort=False)

        for visit in list(amyloid_qc_subj.VISCODE2.unique()):
            amyloid_qc_visit = amyloid_qc_subj[amyloid_qc_subj.VISCODE2 ==
                                               visit]

            # TODO Check
            # If there are several scans for a timepoint we keep image acquired last (higher LONIUID)
            amyloid_qc_visit = amyloid_qc_visit.sort_values("LONIUID",
                                                            ascending=False)

            qc_visit = amyloid_qc_visit.iloc[0]

            # Corresponding LONI image ID for original scan in PET Meta List
            int_image_id = int(qc_visit.LONIUID[1:])

            original_pet_meta = subject_pet_meta[
                (subject_pet_meta['Orig/Proc'] == 'Original')
                & (subject_pet_meta['Image ID'] == int_image_id)
                &
                (subject_pet_meta.Sequence.map(lambda s:
                                               (s.lower().find('early') < 0)))]

            # If no corresponding Amyloid PET metadata for an original image,
            # take scan at the same date
            if original_pet_meta.shape[0] < 1:
                original_pet_meta = subject_pet_meta[
                    (subject_pet_meta['Orig/Proc'] == 'Original')
                    & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)
                    & (subject_pet_meta.Sequence.map(
                        lambda s: (s.lower().find('early') < 0)))]

                if original_pet_meta.shape[0] < 1:
                    # TODO Log somewhere QC visits without image metadata
                    cprint('No Amyloid PET images metadata for subject - ' +
                           subj + ' for visit ' + qc_visit.VISCODE2)
                    continue

            original_image = original_pet_meta.iloc[0]

            # To determine type of amyloid PET tracer we find the
            # Coreg, Avg, Std Img and Vox Siz, Uniform Resolution image
            # with the same Series ID of the original image
            final_pet_meta = subject_pet_meta[
                (subject_pet_meta.Sequence.map(lambda x: (x.find(
                    'Coreg, Avg, Std Img and Vox Siz, Uniform Resolution') > 0)
                                               ))
                &
                (subject_pet_meta['Series ID'] == original_image['Series ID'])]

            if final_pet_meta.shape[0] < 1:
                final_pet_meta = subject_pet_meta[
                    (subject_pet_meta.Sequence.map(lambda x: (x.find(
                        'Coreg, Avg, Std Img and Vox Siz, Uniform Resolution')
                                                              > 0)))
                    & (subject_pet_meta['Scan Date'] ==
                       original_image['Scan Date'])]
                if final_pet_meta.shape[0] < 1:
                    # TODO Log
                    cprint(
                        'No "Coreg, Avg, Std Img and Vox Siz, Uniform Resolution" Amyloid PET image metadata for subject'
                        ' ' + subj + ' for visit ' + qc_visit.VISCODE2)
                    continue

            processed_sequence = final_pet_meta.iloc[0].Sequence

            if processed_sequence.startswith('AV45'):
                tracer = 'AV45'
            elif processed_sequence.startswith('FBB'):
                tracer = 'FBB'
            else:
                # TODO Log
                cprint(
                    'Unknown tracer for Amyloid PET image metadata for subject '
                    + subj + ' for visit ' + qc_visit.VISCODE2)
                continue

            # Co-registered and Averaged image with the same Series ID of the original image
            averaged_pet_meta = subject_pet_meta[
                (subject_pet_meta['Sequence'] == '%s Co-registered, Averaged' %
                 tracer)
                &
                (subject_pet_meta['Series ID'] == original_image['Series ID'])]

            # If an explicit Co-registered, Averaged image does not exist,
            # the original image is already in that preprocessing stage.

            if averaged_pet_meta.shape[0] < 1:
                sel_image = original_image
                original = True
            else:
                sel_image = averaged_pet_meta.iloc[0]
                original = False

            visit = sel_image.Visit
            sequence = replace_sequence_chars(sel_image.Sequence)
            date = sel_image['Scan Date']
            study_id = sel_image['Study ID']
            series_id = sel_image['Series ID']
            image_id = sel_image['Image ID']

            row_to_append = pd.DataFrame([[
                qc_visit.Phase, subj, qc_visit.VISCODE2,
                str(visit), sequence, date,
                str(study_id),
                str(series_id),
                str(image_id), original, tracer
            ]],
                                         columns=pet_amyloid_col)
            pet_amyloid_df = pet_amyloid_df.append(row_to_append,
                                                   ignore_index=True)

    # TODO check for new exceptions in ADNI3
    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1
        ('128_S_2220', 'm48')
    ]

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((pet_amyloid_df.Subject_ID == conv_error[0])
                             & (pet_amyloid_df.VISCODE == conv_error[1]))

    if error_indices:
        indices_to_remove = pet_amyloid_df.index[reduce(
            operator.or_, error_indices, False)]
        pet_amyloid_df.drop(indices_to_remove, inplace=True)

    # DONE - Make a function reusing this code for different modalities
    # TODO check if it works properly

    images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I',
                             'Image_ID')

    amyloid_csv_path = path.join(dest_dir, 'conversion_info')
    if not os.path.exists(amyloid_csv_path):
        os.mkdir(amyloid_csv_path)
    images.to_csv(path.join(amyloid_csv_path, 'amyloid_pet_paths.tsv'),
                  sep='\t',
                  index=False)

    return images
Beispiel #22
0
def compute_flair_paths(source_dir, csv_dir, dest_dir, subjs_list):
    """
    Compute the paths to the FLAIR images and store them in a tsv file

    Args:
        source_dir: path to the ADNI directory
        csv_dir: path to the clinical data directory
        dest_dir: path to the destination BIDS directory
        subjs_list: subjects list

    Returns:
        images: a dataframe with all the paths to the FLAIR images that will be converted into BIDS

    """

    import operator
    from os import path, mkdir
    from functools import reduce
    import pandas as pd

    from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, visits_to_timepoints_mrilist

    flair_col_df = [
        'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID',
        'Series_ID', 'Image_ID', 'Field_Strength', 'Scanner'
    ]
    flair_df = pd.DataFrame(columns=flair_col_df)

    # Loading needed .csv files
    adni_merge = pd.read_csv(path.join(csv_dir, 'ADNIMERGE.csv'),
                             sep=',',
                             low_memory=False)

    mayo_mri_qc = pd.read_csv(path.join(csv_dir,
                                        'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv'),
                              sep=',',
                              low_memory=False)
    mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == 'AFL']

    mri_list = pd.read_csv(path.join(csv_dir, 'MRILIST.csv'),
                           sep=',',
                           low_memory=False)

    # Selecting FLAIR DTI images that are not TODO images
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: x.lower().find('flair') > -1)]
    unwanted_sequences = ['_MPR_']
    mri_list = mri_list[mri_list.SEQUENCE.map(
        lambda x: not any(subs in x for subs in unwanted_sequences))]

    for subj in subjs_list:

        # Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date
        adnimerge_subj = adni_merge[adni_merge.PTID == subj]
        adnimerge_subj = adnimerge_subj.sort_values('EXAMDATE')

        mri_list_subj = mri_list[mri_list.SUBJECT == subj]
        mri_list_subj = mri_list_subj.sort_values('SCANDATE')

        mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]

        # Obtain corresponding timepoints for the subject visits
        visits = visits_to_timepoints_mrilist(subj, mri_list_subj,
                                              adnimerge_subj, 'FLAIR')

        for visit_info in visits.keys():
            timepoint = visit_info[0]
            visit_str = visits[visit_info]

            visit_mri_list = mri_list_subj[mri_list_subj.VISIT == visit_str]
            flair = flair_image(subj, timepoint, visits[visit_info],
                                visit_mri_list, mayo_mri_qc_subj)

            if flair is not None:
                row_to_append = pd.DataFrame(flair, index=[
                    'i',
                ])
                # TODO Replace iteratively appending by pandas.concat
                flair_df = flair_df.append(row_to_append,
                                           ignore_index=True,
                                           sort=False)

    # Exceptions
    # ==========
    conversion_errors = [  # Eq_1 images
        ('141_S_0767', 'm84'),
        ('067_S_5205', 'bl'),
        ('127_S_4928', 'm24'),
        ('024_S_4674', 'm06'),
        ('123_S_2363', 'm24'),
        ('053_S_4578', 'm48'),
        ('128_S_4586', 'm48'),
        ('053_S_4813', 'm48'),
        ('053_S_5272', 'm24'),
    ]

    error_indices = []
    for conv_error in conversion_errors:
        error_indices.append((flair_df.Subject_ID == conv_error[0])
                             & (flair_df.VISCODE == conv_error[1]))
    if error_indices:
        indices_to_remove = flair_df.index[reduce(operator.or_, error_indices,
                                                  False)]
        flair_df.drop(indices_to_remove, inplace=True)

    # Checking for images paths in filesystem
    images = find_image_path(flair_df, source_dir, 'FLAIR', 'S', 'Series_ID')

    flair_tsv_path = path.join(dest_dir, 'conversion_info')
    if not path.exists(flair_tsv_path):
        mkdir(flair_tsv_path)
    images.to_csv(path.join(flair_tsv_path, 'flair_paths.tsv'),
                  sep='\t',
                  index=False)

    return images