def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """Compute the paths to the PIB PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os from os import path from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path pet_pib_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original' ] pet_pib_df = pd.DataFrame(columns=pet_pib_col) pet_pib_dfs_list = [] # Loading needed .csv files pibqc = pd.read_csv(path.join(csv_dir, 'PIBQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue # QC for PIB PET images pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))] sequences_preprocessing_step = ['PIB Co-registered, Averaged'] subj_dfs_list = get_images_pet(subj, pet_qc_subj, subject_pet_meta, pet_pib_col, 'PIB-PET', sequences_preprocessing_step, viscode_field="VISCODE") if subj_dfs_list: pet_pib_dfs_list += subj_dfs_list if pet_pib_dfs_list: pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [] # Removing known exceptions from images to convert if not pet_pib_df.empty: error_ind = pet_pib_df.index[pet_pib_df.apply(lambda x: ( (x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)] pet_pib_df.drop(error_ind, inplace=True) images = find_image_path(pet_pib_df, source_dir, 'PIB', 'I', 'Image_ID') pib_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(pib_csv_path): os.mkdir(pib_csv_path) images.to_csv(path.join(pib_csv_path, 'pib_pet_paths.tsv'), sep='\t', index=False) return images
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir): """Compute the paths to the PIB PET images and store them in a TSV file. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list conversion_dir: path to the TSV files including the paths to original images Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import ( find_image_path, get_images_pet, ) pet_pib_col = [ "Phase", "Subject_ID", "VISCODE", "Visit", "Sequence", "Scan_Date", "Study_ID", "Series_ID", "Image_ID", "Original", ] pet_pib_df = pd.DataFrame(columns=pet_pib_col) pet_pib_dfs_list = [] # Loading needed .csv files pibqc = pd.read_csv(path.join(csv_dir, "PIBQC.csv"), sep=",", low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj] if subject_pet_meta.empty: continue # QC for PIB PET images pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))] sequences_preprocessing_step = ["PIB Co-registered, Averaged"] subj_dfs_list = get_images_pet( subj, pet_qc_subj, subject_pet_meta, pet_pib_col, "PIB-PET", sequences_preprocessing_step, viscode_field="VISCODE", ) if subj_dfs_list: pet_pib_dfs_list += subj_dfs_list if pet_pib_dfs_list: pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [] # Removing known exceptions from images to convert if not pet_pib_df.empty: error_ind = pet_pib_df.index[pet_pib_df.apply(lambda x: ( (x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)] pet_pib_df.drop(error_ind, inplace=True) images = find_image_path(pet_pib_df, source_dir, "PIB", "I", "Image_ID") images.to_csv(path.join(conversion_dir, "pib_pet_paths.tsv"), sep="\t", index=False) return images
def compute_av45_fbb_pet_paths( source_dir, csv_dir, dest_dir, subjs_list, conversion_dir ): """Compute the paths to the AV45 and Florbetaben PET images and store them in a TSV file. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list conversion_dir: path to the TSV files including the paths to original images Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import ( find_image_path, get_images_pet, ) pet_amyloid_col = [ "Phase", "Subject_ID", "VISCODE", "Visit", "Sequence", "Scan_Date", "Study_ID", "Series_ID", "Image_ID", "Original", "Tracer", ] pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col) pet_amyloid_dfs_list = [] # Loading needed .csv files av45qc = pd.read_csv(path.join(csv_dir, "AV45QC.csv"), sep=",", low_memory=False) amyqc = pd.read_csv(path.join(csv_dir, "AMYQC.csv"), sep=",", low_memory=False) pet_meta_list = pd.read_csv( path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False ) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj] if subject_pet_meta.empty: continue # QC for AV45 PET images for ADNI 1, GO and 2 av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))] # QC for Amyloid PET images for ADNI 3 amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))] amy_qc_subj.insert(0, "EXAMDATE", amy_qc_subj.SCANDATE.to_list()) # Concatenating visits in both QC files amyloid_qc_subj = pd.concat( [av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False ) sequences_preprocessing_step = [ "AV45 Co-registered, Averaged", "FBB Co-registered, Averaged", ] subj_dfs_list = get_images_pet( subj, amyloid_qc_subj, subject_pet_meta, pet_amyloid_col, "Amyloid-PET", sequences_preprocessing_step, ) if subj_dfs_list: pet_amyloid_dfs_list += subj_dfs_list if pet_amyloid_dfs_list: pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [ # Eq_1 ("128_S_2220", "m48"), # Several output images ("098_S_4275", "m84"), ] # Removing known exceptions from images to convert if not pet_amyloid_df.empty: error_ind = pet_amyloid_df.index[ pet_amyloid_df.apply( lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1 ) ] pet_amyloid_df.drop(error_ind, inplace=True) images = find_image_path(pet_amyloid_df, source_dir, "Amyloid", "I", "Image_ID") images.to_csv( path.join(conversion_dir, "amyloid_pet_paths.tsv"), sep="\t", index=False ) return images
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """Compute the paths to the FDG PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os from os import path from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path pet_fdg_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original'] pet_fdg_df = pd.DataFrame(columns=pet_fdg_col) pet_fdg_dfs_list = [] # Loading needed .csv files petqc = pd.read_csv(path.join(csv_dir, 'PETQC.csv'), sep=',', low_memory=False) petqc3 = pd.read_csv(path.join(csv_dir, 'PETC3.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue # QC for FDG PET images for ADNI 1, GO and 2 pet_qc_1go2_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))] # QC for FDG PET images for ADNI 3 pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1) & (petqc3.RID == int(subj[-4:]))] pet_qc3_subj.insert(0, 'EXAMDATE', pet_qc3_subj.SCANDATE.to_list()) # Concatenating visits in both QC files pet_qc_subj = pd.concat([pet_qc_1go2_subj, pet_qc3_subj], axis=0, ignore_index=True, sort=False) sequences_preprocessing_step = ['Co-registered, Averaged'] subj_dfs_list = get_images_pet(subj, pet_qc_subj, subject_pet_meta, pet_fdg_col, 'FDG-PET', sequences_preprocessing_step) if subj_dfs_list: pet_fdg_dfs_list += subj_dfs_list if pet_fdg_dfs_list: # Concatenating dataframes into one pet_fdg_df = pd.concat(pet_fdg_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [ # NONAME.nii ('031_S_0294', 'bl'), ('037_S_1421', 'm36'), ('037_S_1078', 'm36'), # Empty folders ('941_S_1195', 'm48'), ('005_S_0223', 'm12')] # Removing known exceptions from images to convert if not pet_fdg_df.empty: error_ind = pet_fdg_df.index[pet_fdg_df.apply(lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)] pet_fdg_df.drop(error_ind, inplace=True) images = find_image_path(pet_fdg_df, source_dir, 'FDG', 'I', 'Image_ID') fdg_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(fdg_csv_path): os.mkdir(fdg_csv_path) images.to_csv(path.join(fdg_csv_path, 'fdg_pet_paths.tsv'), sep='\t', index=False) return images
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir): """Compute the paths to Tau PET images. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list conversion_dir: path to the TSV files including the paths to original images Returns: pandas Dataframe containing the path for each Tau PET image """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import ( find_image_path, get_images_pet, ) pet_tau_col = [ "Phase", "Subject_ID", "VISCODE", "Visit", "Sequence", "Scan_Date", "Study_ID", "Series_ID", "Image_ID", "Original", ] pet_tau_df = pd.DataFrame(columns=pet_tau_col) pet_tau_dfs_list = [] # Loading needed .csv files tauqc = pd.read_csv(path.join(csv_dir, "TAUQC.csv"), sep=",", low_memory=False) tauqc3 = pd.read_csv(path.join(csv_dir, "TAUQC3.csv"), sep=",", low_memory=False) pet_meta_list = pd.read_csv( path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False ) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj] if subject_pet_meta.empty: continue # QC for TAU PET images for ADNI 2 tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1) & (tauqc.RID == int(subj[-4:]))] # QC for TAU PET images for ADNI 3 tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1) & (tauqc3.RID == int(subj[-4:]))] # Concatenating visits in both QC files tau_qc_subj = pd.concat( [tau_qc2_subj, tau_qc3_subj], axis=0, ignore_index=True, sort=False ) tau_qc_subj.rename(columns={"SCANDATE": "EXAMDATE"}, inplace=True) sequences_preprocessing_step = ["AV1451 Co-registered, Averaged"] subj_dfs_list = get_images_pet( subj, tau_qc_subj, subject_pet_meta, pet_tau_col, "TAU-PET", sequences_preprocessing_step, ) if subj_dfs_list: pet_tau_dfs_list += subj_dfs_list if pet_tau_dfs_list: pet_tau_df = pd.concat(pet_tau_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [("098_S_4275", "m84")] # Multiple output images # Removing known exceptions from images to convert if not pet_tau_df.empty: error_ind = pet_tau_df.index[ pet_tau_df.apply( lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1 ) ] pet_tau_df.drop(error_ind, inplace=True) # Checking for images paths in filesystem images = find_image_path(pet_tau_df, source_dir, "TAU", "I", "Image_ID") images.to_csv(path.join(conversion_dir, "tau_pet_paths.tsv"), sep="\t", index=False) return images
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to Tau PET images Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: pandas Dataframe containing the path for each Tau PET image """ import pandas as pd import os from os import path from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path pet_tau_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original' ] pet_tau_df = pd.DataFrame(columns=pet_tau_col) pet_tau_dfs_list = [] # Loading needed .csv files tauqc = pd.read_csv(path.join(csv_dir, 'TAUQC.csv'), sep=',', low_memory=False) tauqc3 = pd.read_csv(path.join(csv_dir, 'TAUQC3.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue # QC for TAU PET images for ADNI 2 tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1) & (tauqc.RID == int(subj[-4:]))] # QC for TAU PET images for ADNI 3 tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1) & (tauqc3.RID == int(subj[-4:]))] # Concatenating visits in both QC files tau_qc_subj = pd.concat([tau_qc2_subj, tau_qc3_subj], axis=0, ignore_index=True, sort=False) tau_qc_subj.rename(columns={"SCANDATE": "EXAMDATE"}, inplace=True) sequences_preprocessing_step = ['AV1451 Co-registered, Averaged'] subj_dfs_list = get_images_pet(subj, tau_qc_subj, subject_pet_meta, pet_tau_col, 'TAU-PET', sequences_preprocessing_step) if subj_dfs_list: pet_tau_dfs_list += subj_dfs_list if pet_tau_dfs_list: pet_tau_df = pd.concat(pet_tau_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [ # Multiple output images ('098_S_4275', 'm84') ] # Removing known exceptions from images to convert if not pet_tau_df.empty: error_ind = pet_tau_df.index[pet_tau_df.apply(lambda x: ( (x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)] pet_tau_df.drop(error_ind, inplace=True) # Checking for images paths in filesystem images = find_image_path(pet_tau_df, source_dir, 'TAU', 'I', 'Image_ID') tau_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(tau_csv_path): os.mkdir(tau_csv_path) images.to_csv(path.join(tau_csv_path, 'tau_pet_paths.tsv'), sep='\t', index=False) return images
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir): """Compute the paths to the FDG PET images and store them in a TSV file. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list conversion_dir: path to the TSV files including the paths to original images Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import ( find_image_path, get_images_pet, ) pet_fdg_col = [ "Phase", "Subject_ID", "VISCODE", "Visit", "Sequence", "Scan_Date", "Study_ID", "Series_ID", "Image_ID", "Original", ] pet_fdg_df = pd.DataFrame(columns=pet_fdg_col) pet_fdg_dfs_list = [] # Loading needed .csv files petqc = pd.read_csv(path.join(csv_dir, "PETQC.csv"), sep=",", low_memory=False) petqc3 = pd.read_csv(path.join(csv_dir, "PETC3.csv"), sep=",", low_memory=False) pet_meta_list = pd.read_csv( path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False ) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj] if subject_pet_meta.empty: continue # QC for FDG PET images for ADNI 1, GO and 2 pet_qc_1go2_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))] # QC for FDG PET images for ADNI 3 pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1) & (petqc3.RID == int(subj[-4:]))] pet_qc3_subj.insert(0, "EXAMDATE", pet_qc3_subj.SCANDATE.to_list()) # Concatenating visits in both QC files pet_qc_subj = pd.concat( [pet_qc_1go2_subj, pet_qc3_subj], axis=0, ignore_index=True, sort=False ) sequences_preprocessing_step = ["Co-registered, Averaged"] subj_dfs_list = get_images_pet( subj, pet_qc_subj, subject_pet_meta, pet_fdg_col, "FDG-PET", sequences_preprocessing_step, ) if subj_dfs_list: pet_fdg_dfs_list += subj_dfs_list if pet_fdg_dfs_list: # Concatenating dataframes into one pet_fdg_df = pd.concat(pet_fdg_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [ # NONAME.nii ("031_S_0294", "bl"), ("037_S_1421", "m36"), ("037_S_1078", "m36"), # Empty folders ("941_S_1195", "m48"), ("005_S_0223", "m12"), ] # Removing known exceptions from images to convert if not pet_fdg_df.empty: error_ind = pet_fdg_df.index[ pet_fdg_df.apply( lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1 ) ] pet_fdg_df.drop(error_ind, inplace=True) images = find_image_path(pet_fdg_df, source_dir, "FDG", "I", "Image_ID") images.to_csv(path.join(conversion_dir, "fdg_pet_paths.tsv"), sep="\t", index=False) return images
def compute_av45_fbb_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to the AV45 and Florbetaben PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os from os import path from clinica.iotools.converters.adni_to_bids.adni_utils import get_images_pet, find_image_path pet_amyloid_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer' ] pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col) pet_amyloid_dfs_list = [] # Loading needed .csv files av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'), sep=',', low_memory=False) amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue # QC for AV45 PET images for ADNI 1, GO and 2 av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))] # QC for Amyloid PET images for ADNI 3 amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))] amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list()) # Concatenating visits in both QC files amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False) sequences_preprocessing_step = [ 'AV45 Co-registered, Averaged', 'FBB Co-registered, Averaged' ] subj_dfs_list = get_images_pet(subj, amyloid_qc_subj, subject_pet_meta, pet_amyloid_col, 'Amyloid-PET', sequences_preprocessing_step) if subj_dfs_list: pet_amyloid_dfs_list += subj_dfs_list if pet_amyloid_dfs_list: pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True) # Exceptions # ========== conversion_errors = [ # Eq_1 ('128_S_2220', 'm48'), # Several output images ('098_S_4275', 'm84') ] # Removing known exceptions from images to convert if not pet_amyloid_df.empty: error_ind = pet_amyloid_df.index[pet_amyloid_df.apply(lambda x: ( (x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)] pet_amyloid_df.drop(error_ind, inplace=True) images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I', 'Image_ID') amyloid_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(amyloid_csv_path): os.mkdir(amyloid_csv_path) images.to_csv(path.join(amyloid_csv_path, 'amyloid_pet_paths.tsv'), sep='\t', index=False) return images