def dwi_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj): """ One image among those in the input list is chosen according to QC and then correspoding metadata is extracted to a dictionary Args: subject_id: Subject identifier timepoint: Visit code visit_str: Visit name visit_mri_list: List of images metadata mri_qc_subj: Dataframe containing list of QC of scans for the subject Returns: dictionary - contains image metadata """ from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, select_image_qc sel_image = select_image_qc(list(visit_mri_list.IMAGEUID), mri_qc_subj) if sel_image is None: return None sel_scan = visit_mri_list[visit_mri_list.IMAGEUID == sel_image].iloc[0] image_dict = {'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': replace_sequence_chars(sel_scan.SEQUENCE), 'Scan_Date': sel_scan['SCANDATE'], 'Study_ID': str(int(sel_scan.STUDYID)), 'Series_ID': str(int(sel_scan.SERIESID)), 'Image_ID': str(int(sel_scan.IMAGEUID)), 'Field_Strength': sel_scan.MAGSTRENGTH} return image_dict
def adni3_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj): from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars from clinica.utils.stream import cprint filtered_scan = mprage_meta_subj[(mprage_meta_subj['Orig/Proc'] == 'Original') & (mprage_meta_subj.Visit == visit_str) & mprage_meta_subj.Sequence.map( lambda x: (x.lower().find('accel') > -1) & ~(x.lower().endswith('_ND')))] if filtered_scan.shape[0] < 1: # TODO - LOG THIS cprint('NO MPRAGE Meta for ADNI3: ' + subject_id + ' for visit ' + timepoint + ' - ' + visit_str) return None scan = select_scan_qc_adni2(filtered_scan, mayo_mri_qc_subj, preferred_field_strength=3.0) sequence = replace_sequence_chars(scan.Sequence) return {'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': True}
def dwi_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj): """ Args: subject_id: timepoint: visit_str: visit_mri_list: mri_qc_subj: Returns: """ from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, select_image_qc sel_image = select_image_qc(list(visit_mri_list.IMAGEUID), mri_qc_subj) if sel_image is None: return None sel_scan = visit_mri_list[visit_mri_list.IMAGEUID == sel_image].iloc[0] image_dict = { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': replace_sequence_chars(sel_scan.SEQUENCE), 'Scan_Date': sel_scan['SCANDATE'], 'Study_ID': str(int(sel_scan.STUDYID)), 'Series_ID': str(int(sel_scan.SERIESID)), 'Image_ID': str(int(sel_scan.IMAGEUID)), 'Field_Strength': sel_scan.MAGSTRENGTH } return image_dict
def adni1GO2_image(subject_id, timepoint, visit_str, mprage_meta_subj, mri_quality_subj, mayo_mri_qc_subj, preferred_field_strength=3.0): from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars # Get the preferred scan (image series that has been Scaled) filtered_mprage = mprage_meta_subj[(mprage_meta_subj['Orig/Proc'] == 'Processed') & (mprage_meta_subj.Visit == visit_str) & (mprage_meta_subj.Sequence.map(lambda x: x.endswith('Scaled')))] # If no preferred image found, get N3 processed image (N3m) if filtered_mprage.shape[0] < 1: filtered_mprage = mprage_meta_subj[(mprage_meta_subj['Orig/Proc'] == 'Processed') & (mprage_meta_subj.Visit == visit_str) & (mprage_meta_subj.Sequence.map(lambda x: x.endswith('N3m')))] # If no N3 processed image found (it means there are no processed images at all), get best original image if filtered_mprage.shape[0] < 1: return original_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj, preferred_field_strength) # If there are images with different magnetic field strength, prefer 1.5T images for ADNI1, 3.0T otherwise if len(filtered_mprage.MagStrength.unique()) > 1: filtered_mprage = filtered_mprage[filtered_mprage.MagStrength == preferred_field_strength] # Sort by Series ID in case there are several images, so we keep the one acquired first filtered_mprage = filtered_mprage.sort_values('SeriesID') scan = filtered_mprage.iloc[0] # Check if selected scan passes QC (if QC exists) if not check_qc(scan, subject_id, visit_str, mprage_meta_subj, mri_quality_subj): return None n3 = scan.Sequence.find('N3') # Sequence ends in 'N3' or in 'N3m' sequence = scan.Sequence[:n3 + 2 + int(scan.Sequence[n3 + 2] == 'm')] sequence = replace_sequence_chars(sequence) return {'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': False}
def adni3_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj): """Selects the preferred scan for a subject in a visit, given the subject belongs to ADNI 3 cohort Args: subject_id: string containing subject ID timepoint: string of visit code in months visit_str: string of visit name mprage_meta_subj: DataFrame of MPRAGE metadata of images corresponding to the subject mayo_mri_qc_subj: DatFrame of MAYO Clinic MR image quality of images corresponding to the subject Returns: Dictionary containing selected scan information """ from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars from clinica.utils.stream import cprint filtered_scan = mprage_meta_subj[ (mprage_meta_subj['Orig/Proc'] == 'Original') & (mprage_meta_subj.Visit == visit_str) & mprage_meta_subj.Sequence.str.contains('accel', case=False, na=False) & ~mprage_meta_subj.Sequence.str.lower().str.endswith('_nd', na=False)] if filtered_scan.empty: cprint('NO MPRAGE Meta for ADNI3: ' + subject_id + ' for visit ' + timepoint + ' - ' + visit_str) return None scan = select_scan_from_qc(filtered_scan, mayo_mri_qc_subj, preferred_field_strength=3.0) sequence = replace_sequence_chars(scan.Sequence) return { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': True }
def adni3_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj): """Select the preferred scan for a subject in a visit, given the subject belongs to ADNI 3 cohort. Args: subject_id: string containing subject ID timepoint: string of visit code in months visit_str: string of visit name mprage_meta_subj: DataFrame of MPRAGE metadata of images corresponding to the subject mayo_mri_qc_subj: DatFrame of MAYO Clinic MR image quality of images corresponding to the subject Returns: Dictionary containing selected scan information """ from clinica.iotools.converters.adni_to_bids.adni_utils import ( replace_sequence_chars, ) from clinica.utils.stream import cprint filtered_scan = mprage_meta_subj[ (mprage_meta_subj["Orig/Proc"] == "Original") & (mprage_meta_subj.Visit == visit_str) & mprage_meta_subj.Sequence.str.contains("accel", case=False, na=False) & ~mprage_meta_subj.Sequence.str.lower().str.endswith("_nd", na=False)] if filtered_scan.empty: cprint("NO MPRAGE Meta for ADNI3: " + subject_id + " for visit " + timepoint + " - " + visit_str) return None scan = select_scan_from_qc(filtered_scan, mayo_mri_qc_subj, preferred_field_strength=3.0) sequence = replace_sequence_chars(scan.Sequence) return { "Subject_ID": subject_id, "VISCODE": timepoint, "Visit": visit_str, "Sequence": sequence, "Scan_Date": scan.ScanDate, "Study_ID": str(scan.StudyID), "Series_ID": str(scan.SeriesID), "Image_ID": str(scan.ImageUID), "Field_Strength": scan.MagStrength, "Original": True, }
def adni2_image(subject_id, timepoint, visit_str, mprage_meta_subj_orig, mayo_mri_qc_subj, preferred_field_strength=3.0): from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars from clinica.utils.stream import cprint cond_mprage = ((mprage_meta_subj_orig.Visit == visit_str) & mprage_meta_subj_orig.Sequence.map(lambda x: ( (x.lower().find('mprage') > -1) | (x.lower().find('mp-rage') > -1) | (x.lower().find('mp rage') > -1)) & (x.find('2') < 0))) cond_spgr = ((mprage_meta_subj_orig.Visit == visit_str) & mprage_meta_subj_orig.Sequence.map(lambda x: (x.lower( ).find('spgr') > -1) & (x.lower().find('acc') < 0))) filtered_scan = mprage_meta_subj_orig[cond_mprage | cond_spgr] if filtered_scan.shape[0] < 1: # TODO - LOG THIS cprint('NO MPRAGE Meta2: ' + subject_id + ' for visit ' + timepoint + ' - ' + visit_str) return None scan = select_scan_qc_adni2(filtered_scan, mayo_mri_qc_subj, preferred_field_strength) sequence = replace_sequence_chars(scan.Sequence) return { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': True }
def dwi_image(subject_id, timepoint, visit_str, ida_meta_scans, mri_qc_subj, enhanced): """ Args: subject_id: timepoint: visit_str: ida_meta_scans: mri_qc_subj: enhanced: Returns: """ from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars sel_image = select_image_qc(list(ida_meta_scans.IMAGEUID), mri_qc_subj) if sel_image is None: return None sel_scan = ida_meta_scans[ida_meta_scans.IMAGEUID == sel_image].iloc[0] sequence = sel_scan.Sequence sequence = replace_sequence_chars(sequence) image_dict = { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': sel_scan['Scan Date'], 'Study_ID': str(int(sel_scan.LONISID)), 'Series_ID': str(int(sel_scan.LONIUID)), 'Image_ID': str(int(sel_scan.IMAGEUID)), 'Field_Strength': sel_scan.MagStrength, 'Scanner': sel_scan.Scanner, 'Enhanced': enhanced } return image_dict
def fmri_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj): """ One image among those in the input list is chosen according to QC and then correspoding metadata is extracted to a dictionary. Args: subject_id: Subject identifier timepoint: Visit code visit_str: Visit name visit_mri_list: List of images metadata mri_qc_subj: Dataframe containing list of QC of scans for the subject Returns: dictionary - contains image metadata """ from clinica.iotools.converters.adni_to_bids.adni_utils import ( replace_sequence_chars, select_image_qc, ) mri_qc_subj.columns = [x.lower() for x in mri_qc_subj.columns] sel_image = select_image_qc(list(visit_mri_list.IMAGEUID), mri_qc_subj) if sel_image is None: return None sel_scan = visit_mri_list[visit_mri_list.IMAGEUID == sel_image].iloc[0] image_dict = { "Subject_ID": subject_id, "VISCODE": timepoint, "Visit": visit_str, "Sequence": replace_sequence_chars(sel_scan.SEQUENCE), "Scan_Date": sel_scan["SCANDATE"], "Study_ID": str(int(sel_scan.STUDYID)), "Series_ID": str(int(sel_scan.SERIESID)), "Image_ID": str(int(sel_scan.IMAGEUID)), "Field_Strength": sel_scan.MAGSTRENGTH, } return image_dict
def compute_av45_fbb_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to the AV45 and Florbetaben PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os import operator from os import path from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path from clinica.utils.stream import cprint from functools import reduce pet_amyloid_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer' ] pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col) # Loading needed .csv files av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'), sep=',', low_memory=False) amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.shape[0] < 1: # TODO Log somewhere subjects without Amyloid PET images # cprint('No Amyloid PET images metadata for subject - ' + subj) continue # QC for AV45 PET images for ADNI 1, GO and 2 av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))] # QC for Amyloid PET images for ADNI 3 amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))] amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list()) # Concatenating visits in both QC files amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False) for visit in list(amyloid_qc_subj.VISCODE2.unique()): amyloid_qc_visit = amyloid_qc_subj[amyloid_qc_subj.VISCODE2 == visit] # TODO Check # If there are several scans for a timepoint we keep image acquired last (higher LONIUID) amyloid_qc_visit = amyloid_qc_visit.sort_values("LONIUID", ascending=False) qc_visit = amyloid_qc_visit.iloc[0] # Corresponding LONI image ID for original scan in PET Meta List int_image_id = int(qc_visit.LONIUID[1:]) original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Image ID'] == int_image_id) & (subject_pet_meta.Sequence.map(lambda s: (s.lower().find('early') < 0)))] # If no corresponding Amyloid PET metadata for an original image, # take scan at the same date if original_pet_meta.shape[0] < 1: original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE) & (subject_pet_meta.Sequence.map( lambda s: (s.lower().find('early') < 0)))] if original_pet_meta.shape[0] < 1: # TODO Log somewhere QC visits without image metadata cprint('No Amyloid PET images metadata for subject - ' + subj + ' for visit ' + qc_visit.VISCODE2) continue original_image = original_pet_meta.iloc[0] # To determine type of amyloid PET tracer we find the # Coreg, Avg, Std Img and Vox Siz, Uniform Resolution image # with the same Series ID of the original image final_pet_meta = subject_pet_meta[ (subject_pet_meta.Sequence.map(lambda x: (x.find( 'Coreg, Avg, Std Img and Vox Siz, Uniform Resolution') > 0) )) & (subject_pet_meta['Series ID'] == original_image['Series ID'])] if final_pet_meta.shape[0] < 1: final_pet_meta = subject_pet_meta[ (subject_pet_meta.Sequence.map(lambda x: (x.find( 'Coreg, Avg, Std Img and Vox Siz, Uniform Resolution') > 0))) & (subject_pet_meta['Scan Date'] == original_image['Scan Date'])] if final_pet_meta.shape[0] < 1: # TODO Log cprint( 'No "Coreg, Avg, Std Img and Vox Siz, Uniform Resolution" Amyloid PET image metadata for subject' ' ' + subj + ' for visit ' + qc_visit.VISCODE2) continue processed_sequence = final_pet_meta.iloc[0].Sequence if processed_sequence.startswith('AV45'): tracer = 'AV45' elif processed_sequence.startswith('FBB'): tracer = 'FBB' else: # TODO Log cprint( 'Unknown tracer for Amyloid PET image metadata for subject ' + subj + ' for visit ' + qc_visit.VISCODE2) continue # Co-registered and Averaged image with the same Series ID of the original image averaged_pet_meta = subject_pet_meta[ (subject_pet_meta['Sequence'] == '%s Co-registered, Averaged' % tracer) & (subject_pet_meta['Series ID'] == original_image['Series ID'])] # If an explicit Co-registered, Averaged image does not exist, # the original image is already in that preprocessing stage. if averaged_pet_meta.shape[0] < 1: sel_image = original_image original = True else: sel_image = averaged_pet_meta.iloc[0] original = False visit = sel_image.Visit sequence = replace_sequence_chars(sel_image.Sequence) date = sel_image['Scan Date'] study_id = sel_image['Study ID'] series_id = sel_image['Series ID'] image_id = sel_image['Image ID'] row_to_append = pd.DataFrame([[ qc_visit.Phase, subj, qc_visit.VISCODE2, str(visit), sequence, date, str(study_id), str(series_id), str(image_id), original, tracer ]], columns=pet_amyloid_col) pet_amyloid_df = pet_amyloid_df.append(row_to_append, ignore_index=True) # TODO check for new exceptions in ADNI3 # Exceptions # ========== conversion_errors = [ # Eq_1 ('128_S_2220', 'm48') ] error_indices = [] for conv_error in conversion_errors: error_indices.append((pet_amyloid_df.Subject_ID == conv_error[0]) & (pet_amyloid_df.VISCODE == conv_error[1])) if error_indices: indices_to_remove = pet_amyloid_df.index[reduce( operator.or_, error_indices, False)] pet_amyloid_df.drop(indices_to_remove, inplace=True) # DONE - Make a function reusing this code for different modalities # TODO check if it works properly images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I', 'Image_ID') amyloid_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(amyloid_csv_path): os.mkdir(amyloid_csv_path) images.to_csv(path.join(amyloid_csv_path, 'amyloid_pet_paths.tsv'), sep='\t', index=False) return images
def compute_fmri_path(source_dir, clinical_dir, dest_dir, subjs_list): """ Compute the paths to fmri images. The fmri images to convert into BIDS are chosen in the following way: - Extract the list of subjects from MAYOADIRL_MRI_FMRI_09_15_16.csv - Select the only the scans that came from PHILIPS machine (field Scanner from IDA_MR_Metadata_Listing.csv) - Discard all the subjects with column series_quality = 4 (4 means that the scan is not usable) in MAYOADIRL_MRI_IMAGEQC_12_08_15.csv In case of multiple scans for the same session, same date the one to convert is chosen with the following criteria: - Check if in the file MAYOADIRL_MRI_IMAGEQC_12_08_15.csv there is a single scan with the field series_selected == 1 - If yes choose the one with series_selected == 1 - If no choose the scan with the best quality Args: source_dir: path to the ADNI image folder clinical_dir: path to the directory with all the clinical data od ADNI dest_dir: path to the output_folder subjs_list: subjects list Returns: pandas Dataframe containing the path for each fmri """ import os from os import path from os import walk import pandas as pd import logging from clinica.iotools.converters.adni_to_bids import adni_utils from clinica.utils.stream import cprint fmri_col = [ 'Subject_ID', 'VISCODE', 'Visit', 'IMAGEUID', 'Sequence', 'Scan Date', 'LONIUID', 'Scanner', 'MagStregth', 'Path' ] fmri_df = pd.DataFrame(columns=fmri_col) # Load the requested clinical data mayo_mri_fmri_path = path.join(clinical_dir, 'MAYOADIRL_MRI_FMRI_09_15_16.csv') mayo_mri_imageqc_path = path.join(clinical_dir, 'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv') ida_mr_metadata_path = path.join(clinical_dir, 'IDA_MR_Metadata_Listing.csv') mayo_mri_fmri = pd.io.parsers.read_csv(mayo_mri_fmri_path, sep=',') ida_mr_metadata = pd.io.parsers.read_csv(ida_mr_metadata_path, sep=',') mayo_mri_imageqc = pd.io.parsers.read_csv(mayo_mri_imageqc_path, sep=',') for subj in subjs_list: # print subj fmri_subjs_info = mayo_mri_fmri[(mayo_mri_fmri.RID == int(subj[-4:]))] # Extract visits available visits_list = fmri_subjs_info['VISCODE2'].tolist() # Removing duplicates visits_list = list(set(visits_list)) if len(visits_list) != 0: for viscode in visits_list: visit = '' image_path = '' fmri_subj = fmri_subjs_info[fmri_subjs_info['VISCODE2'] == viscode] if not fmri_subj.empty: # If there are multiple scans for the same session same subject, check what is the one selected for the usage (field 'series_selected') or # choose the one with the best quality if len(fmri_subj) > 1: fmri_imageuid = fmri_subj['IMAGEUID'].tolist() loni_uid_list = [ 'I' + str(imageuid) for imageuid in fmri_imageuid ] images_qc = mayo_mri_imageqc[ mayo_mri_imageqc.loni_image.isin(loni_uid_list)] series_selected_values = images_qc[ 'series_selected'].tolist() sum_series_selected = sum(series_selected_values) if sum_series_selected == 1: imageuid_to_select = images_qc[ images_qc['series_selected'] > 0]['loni_image'].iloc[0].replace('I', '') else: imageuid_to_select = select_image_qc( fmri_imageuid, images_qc) fmri_subj = fmri_subj[fmri_subj['IMAGEUID'] == int( imageuid_to_select)].iloc[0] else: fmri_subj = fmri_subj.iloc[0] fmri_imageuid = fmri_subj['IMAGEUID'] # Discard scans made with non Philips scanner and with a bad quality fmri_metadata = ida_mr_metadata[ida_mr_metadata['IMAGEUID'] == fmri_imageuid] if not fmri_metadata.empty: fmri_metadata = fmri_metadata.iloc[0] if 'Philips' not in fmri_metadata['Scanner']: cprint('No Philips scanner for ' + subj + ' visit ' + viscode + '. Skipped.') continue elif 4 in mayo_mri_imageqc[ mayo_mri_imageqc['loni_image'] == 'I' + str(fmri_imageuid)]['series_quality'].values: cprint('Bad scan quality for ' + subj + ' visit ' + viscode + '. Skipped.') continue scan_date = fmri_subj.SCANDATE sequence = adni_utils.replace_sequence_chars( fmri_subj.SERDESC) scanner = fmri_metadata['Scanner'] loni_uid = fmri_metadata['LONIUID'] visit = fmri_metadata['Visit'] mag_strenght = fmri_metadata['MagStrength'] # Calculate the path seq_path = path.join(source_dir, str(subj), sequence) for (dirpath, dirnames, filenames) in walk(seq_path): found = False for d in dirnames: if d == 'S' + str(loni_uid): image_path = path.join(dirpath, d) # Check if the path exists if not os.path.isdir(image_path): cprint( 'Path not existing for subject ' + subj + ' visit ' + visit) found = True break if found: break # The session scmri correspond to the baseline if viscode == 'scmri': viscode = 'bl' else: cprint( 'Missing visit, sequence, scan date and loniuid for subject ' + subj + ' visit ' + visit) continue row_to_append = pd.DataFrame([[ subj, str(viscode), visit, str(fmri_imageuid), sequence, scan_date, str(loni_uid), scanner, mag_strenght, image_path ]], columns=fmri_col) fmri_df = fmri_df.append(row_to_append, ignore_index=True) else: logging.info('Missing fMRI for ', subj, 'visit', visit) fmri_df.to_csv(path.join(dest_dir, 'conversion_info', 'fmri_paths.tsv'), sep='\t', index=False) return fmri_df
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Args: source_dir: csv_dir: dest_dir: subjs_list: Returns: """ import pandas as pd import os import operator from os import walk, path from numpy import argsort # from clinica.iotools.converters.adni_utils import replace_sequence_chars from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars from clinica.utils.stream import cprint from functools import reduce pet_fdg_col = ['Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original'] pet_fdg_df = pd.DataFrame(columns=pet_fdg_col) petqc_path = path.join(csv_dir, 'PETQC.csv') pet_meta_list_path = path.join(csv_dir, 'PET_META_LIST.csv') petqc = pd.io.parsers.read_csv(petqc_path, sep=',') pet_meta_list = pd.io.parsers.read_csv(pet_meta_list_path, sep=',') for subj in subjs_list: pet_qc_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))] subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.shape[0] < 1: # TODO Log somewhere subjects with problems cprint('NO Screening: Subject - ' + subj) continue for visit in list(pet_qc_subj.VISCODE2.unique()): pet_qc_visit = pet_qc_subj[pet_qc_subj.VISCODE2 == visit] if pet_qc_visit.shape[0] > 1: normal_images = [] normal_meta = [] for row in pet_qc_visit.iterrows(): image = row[1] pet_meta_image = \ subject_pet_meta[(subject_pet_meta['Image ID'] == int(image.LONIUID[1:]))].iloc[0] if pet_meta_image.Sequence.lower().find('early') < 0: normal_images.append(image) normal_meta.append(pet_meta_image) if len(normal_images) == 0: # TODO Log somewhere subjects with problems cprint('No regular FDG-PET image: Subject - ' + subj + ' for visit ' + visit) continue if len(normal_images) == 1: qc_visit = normal_images[0] else: qc_visit = None index = argsort([x['Series ID'] for x in normal_meta]) for i in index[::-1]: coreg_avg = subject_pet_meta[(subject_pet_meta['Sequence'] == 'Co-registered, Averaged') & ( subject_pet_meta['Series ID'] == normal_meta[i][ 'Series ID'])] if coreg_avg.shape[0] > 0: qc_visit = normal_images[i] break if qc_visit is None: qc_visit = normal_images[index[len(index) - 1]] else: qc_visit = pet_qc_visit.iloc[0] int_image_id = int(qc_visit.LONIUID[1:]) original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Image ID'] == int_image_id) & (subject_pet_meta.Sequence.map(lambda s: (s.lower().find('early') < 0)))] if original_pet_meta.shape[0] < 1: original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta.Sequence.map( lambda x: (x.lower().find('fdg') > -1)) ) & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)] if original_pet_meta.shape[0] < 1: # TODO Log somewhere subjects with problems cprint('NO Screening: Subject - ' + subj + ' for visit ' + qc_visit.VISCODE2) continue original_image = original_pet_meta.iloc[0] averaged_pet_meta = subject_pet_meta[(subject_pet_meta['Sequence'] == 'Co-registered, Averaged') & ( subject_pet_meta['Series ID'] == original_image['Series ID'])] if averaged_pet_meta.shape[0] < 1: sel_image = original_image original = True else: sel_image = averaged_pet_meta.iloc[0] original = False visit = sel_image.Visit sequence = replace_sequence_chars(sel_image.Sequence) date = sel_image['Scan Date'] study_id = sel_image['Study ID'] series_id = sel_image['Series ID'] image_id = sel_image['Image ID'] row_to_append = pd.DataFrame( [[subj, qc_visit.VISCODE2, str(visit), sequence, date, str(study_id), str(series_id), str(image_id), original]], columns=pet_fdg_col) pet_fdg_df = pet_fdg_df.append(row_to_append, ignore_index=True) # Exceptions # ========== conversion_errors = [ # NONAME.nii ('037_S_1421', 'm36'), ('037_S_1078', 'm36'), # Empty folders ('941_S_1195', 'm48'), ('005_S_0223', 'm12')] error_indices = [] for conv_error in conversion_errors: error_indices.append((pet_fdg_df.Subject_ID == conv_error[0]) & (pet_fdg_df.VISCODE == conv_error[1])) indices_to_remove = pet_fdg_df.index[reduce(operator.or_, error_indices, False)] pet_fdg_df.drop(indices_to_remove, inplace=True) images = pet_fdg_df # count = 0 # total = images.shape[0] is_dicom = [] image_folders = [] for row in images.iterrows(): image = row[1] seq_path = path.join(source_dir, str(image.Subject_ID), image.Sequence) # count += 1 # print 'Processing Subject ' + str(image.Subject_ID) + ' - Session ' + image.VISCODE + ', ' + str( # count) + ' / ' + str(total) image_path = '' for (dirpath, dirnames, filenames) in walk(seq_path): found = False for d in dirnames: if d == 'I' + str(image.Image_ID): image_path = path.join(dirpath, d) found = True break if found: break dicom = True for (dirpath, dirnames, filenames) in walk(image_path): for f in filenames: if f.endswith(".nii"): dicom = False image_path = path.join(dirpath, f) break is_dicom.append(dicom) image_folders.append(image_path) if image_path == '': cprint('Not found ' + str(image.Subject_ID)) images.loc[:, 'Is_Dicom'] = pd.Series(is_dicom, index=images.index) images.loc[:, 'Path'] = pd.Series(image_folders, index=images.index) fdg_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(fdg_csv_path): os.mkdir(fdg_csv_path) images.to_csv(path.join(fdg_csv_path, 'fdg_pet_paths.tsv'), sep='\t', index=False) return images
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to the PIB PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os import operator from os import path from functools import reduce from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path from clinica.utils.stream import cprint pet_pib_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original'] pet_pib_df = pd.DataFrame(columns=pet_pib_col) # Loading needed .csv files pibqc = pd.read_csv(path.join(csv_dir, 'PIBQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.shape[0] < 1: # TODO Log somewhere subjects without PIB PET metadata continue # QC for PIB PET images pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))] for visit in list(pet_qc_subj.VISCODE.unique()): pet_qc_visit = pet_qc_subj[pet_qc_subj.VISCODE == visit] qc_visit = pet_qc_visit.iloc[0] # Corresponding LONI image ID for original scan in PET Meta List int_image_id = int(qc_visit.LONIUID[1:]) original_pet_meta = subject_pet_meta[(subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Image ID'] == int_image_id) & (subject_pet_meta.Sequence.map(lambda s: (s.lower().find('early') < 0)))] # If no corresponding PIB PET metadata for an original image, # take scan at the same date containing PIB in sequence name if original_pet_meta.shape[0] < 1: original_pet_meta = subject_pet_meta[(subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta.Sequence.map(lambda x: (x.lower().find('pib') > -1))) & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)] if original_pet_meta.shape[0] < 1: # TODO Log somewhere QC visits without cprint('No PIB-PET images metadata for subject - ' + subj + ' for visit ' + qc_visit.VISCODE) continue original_image = original_pet_meta.iloc[0] # Co-registered and Averaged image with the same Series ID of the original image averaged_pet_meta = subject_pet_meta[(subject_pet_meta['Sequence'] == 'PIB Co-registered, Averaged') & (subject_pet_meta['Series ID'] == original_image['Series ID'])] # If an explicit Co-registered, Averaged image does not exist, # the original image is already in that preprocessing stage. if averaged_pet_meta.shape[0] < 1: sel_image = original_image original = True else: sel_image = averaged_pet_meta.iloc[0] original = False visit = sel_image.Visit sequence = replace_sequence_chars(sel_image.Sequence) date = sel_image['Scan Date'] study_id = sel_image['Study ID'] series_id = sel_image['Series ID'] image_id = sel_image['Image ID'] row_to_append = pd.DataFrame( [['ADNI1', subj, qc_visit.VISCODE, str(visit), sequence, date, str(study_id), str(series_id), str(image_id), original]], columns=pet_pib_col) pet_pib_df = pet_pib_df.append(row_to_append, ignore_index=True) # TODO check for exceptions # Exceptions # ========== conversion_errors = [] error_indices = [] for conv_error in conversion_errors: error_indices.append((pet_pib_df.Subject_ID == conv_error[0]) & (pet_pib_df.VISCODE == conv_error[1])) if error_indices: indices_to_remove = pet_pib_df.index[reduce(operator.or_, error_indices, False)] pet_pib_df.drop(indices_to_remove, inplace=True) # DONE - Make a function reusing this code for different modalities # TODO check if it works properly images = find_image_path(pet_pib_df, source_dir, 'PIB', 'I', 'Image_ID') pib_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(pib_csv_path): os.mkdir(pib_csv_path) images.to_csv(path.join(pib_csv_path, 'pib_pet_paths.tsv'), sep='\t', index=False) return images
def compute_fdg_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to the FDG PET images and store them in a tsv file Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: images: a dataframe with all the paths to the PET images that will be converted into BIDS """ import pandas as pd import os import operator from os import path from functools import reduce from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path from clinica.utils.stream import cprint pet_fdg_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original' ] pet_fdg_df = pd.DataFrame(columns=pet_fdg_col) # Loading needed .csv files petqc = pd.read_csv(path.join(csv_dir, 'PETQC.csv'), sep=',', low_memory=False) petqc3 = pd.read_csv(path.join(csv_dir, 'PETC3.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.shape[0] < 1: # TODO Log somewhere subjects without FDG-PET images metadata continue # QC for FDG PET images for ADNI 1, GO and 2 pet_qc_1go2_subj = petqc[(petqc.PASS == 1) & (petqc.RID == int(subj[-4:]))] # QC for FDG PET images for ADNI 3 pet_qc3_subj = petqc3[(petqc3.SCANQLTY == 1) & (petqc3.RID == int(subj[-4:]))] pet_qc3_subj.insert(0, 'EXAMDATE', pet_qc3_subj.SCANDATE.to_list()) # Concatenating visits in both QC files pet_qc_subj = pd.concat([pet_qc_1go2_subj, pet_qc3_subj], axis=0, ignore_index=True, sort=False) for visit in list(pet_qc_subj.VISCODE2.unique()): pet_qc_visit = pet_qc_subj[pet_qc_subj.VISCODE2 == visit] # If there are several scans for a timepoint we keep image acquired last (higher LONIUID) pet_qc_visit = pet_qc_visit.sort_values("LONIUID", ascending=False) qc_visit = pet_qc_visit.iloc[0] # Corresponding LONI image ID for original scan in PET Meta List int_image_id = int(qc_visit.LONIUID[1:]) original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Image ID'] == int_image_id) & (subject_pet_meta.Sequence.map(lambda s: (s.lower().find('early') < 0)))] # If no corresponding FDG PET metadata for an original image, # take scan at the same date containing FDG in sequence name if original_pet_meta.shape[0] < 1: original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta.Sequence.map( lambda x: (x.lower().find('fdg') > -1))) & (subject_pet_meta['Scan Date'] == qc_visit.EXAMDATE)] if original_pet_meta.shape[0] < 1: # TODO Log somewhere QC visits without image metadata cprint('No FDG-PET images metadata for subject - ' + subj + ' for visit ' + qc_visit.VISCODE2) continue original_image = original_pet_meta.iloc[0] # Co-registered and Averaged image with the same Series ID of the original image averaged_pet_meta = subject_pet_meta[ (subject_pet_meta['Sequence'] == 'Co-registered, Averaged') & (subject_pet_meta['Series ID'] == original_image['Series ID'])] # If an explicit Co-registered, Averaged image does not exist, # the original image is already in that preprocessing stage. if averaged_pet_meta.shape[0] < 1: sel_image = original_image original = True else: sel_image = averaged_pet_meta.iloc[0] original = False visit = sel_image.Visit sequence = replace_sequence_chars(sel_image.Sequence) date = sel_image['Scan Date'] study_id = sel_image['Study ID'] series_id = sel_image['Series ID'] image_id = sel_image['Image ID'] row_to_append = pd.DataFrame([[ qc_visit.Phase, subj, qc_visit.VISCODE2, str(visit), sequence, date, str(study_id), str(series_id), str(image_id), original ]], columns=pet_fdg_col) pet_fdg_df = pet_fdg_df.append(row_to_append, ignore_index=True) # Exceptions # ========== conversion_errors = [ # NONAME.nii ('031_S_0294', 'bl'), ('037_S_1421', 'm36'), ('037_S_1078', 'm36'), # Empty folders ('941_S_1195', 'm48'), ('005_S_0223', 'm12') ] error_indices = [] for conv_error in conversion_errors: error_indices.append((pet_fdg_df.Subject_ID == conv_error[0]) & (pet_fdg_df.VISCODE == conv_error[1])) indices_to_remove = pet_fdg_df.index[reduce(operator.or_, error_indices, False)] pet_fdg_df.drop(indices_to_remove, inplace=True) images = find_image_path(pet_fdg_df, source_dir, 'FDG', 'I', 'Image_ID') fdg_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(fdg_csv_path): os.mkdir(fdg_csv_path) images.to_csv(path.join(fdg_csv_path, 'fdg_pet_paths.tsv'), sep='\t', index=False) return images
def compute_tau_pet_paths(source_dir, csv_dir, dest_dir, subjs_list): """ Compute the paths to Tau PET images Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory subjs_list: subjects list Returns: pandas Dataframe containing the path for each Tau PET image """ import pandas as pd import os import operator from os import path from functools import reduce from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, find_image_path from clinica.utils.stream import cprint pet_tau_col = [ 'Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original' ] pet_tau_df = pd.DataFrame(columns=pet_tau_col) # Loading needed .csv files tauqc = pd.read_csv(path.join(csv_dir, 'TAUQC.csv'), sep=',', low_memory=False) tauqc3 = pd.read_csv(path.join(csv_dir, 'TAUQC3.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: # PET images metadata for subject subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.shape[0] < 1: # TODO Log somewhere subjects without TAU PET images metadata continue # QC for TAU PET images for ADNI 2 tau_qc2_subj = tauqc[(tauqc.SCANQLTY == 1) & (tauqc.RID == int(subj[-4:]))] # QC for TAU PET images for ADNI 3 tau_qc3_subj = tauqc3[(tauqc3.SCANQLTY == 1) & (tauqc3.RID == int(subj[-4:]))] # Concatenating visits in both QC files tau_qc_subj = pd.concat([tau_qc2_subj, tau_qc3_subj], axis=0, ignore_index=True, sort=False) for visit in list(tau_qc_subj.VISCODE2.unique()): # TODO Infer visit from ADNIMERGE visits if str(visit) == 'nan': continue pet_qc_visit = tau_qc_subj[tau_qc_subj.VISCODE2 == visit] # If there are several scans for a timepoint we keep image acquired last (higher LONIUID) pet_qc_visit = pet_qc_visit.sort_values("LONIUID", ascending=False) qc_visit = pet_qc_visit.iloc[0] # Corresponding LONI image ID for original scan in PET Meta List int_image_id = int(qc_visit.LONIUID[1:]) original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & (subject_pet_meta['Image ID'] == int_image_id)] # If no corresponding TAU PET metadata for an original image, # take scan at the same date containing TAU or AV 45 in sequence name if original_pet_meta.shape[0] < 1: original_pet_meta = subject_pet_meta[ (subject_pet_meta['Orig/Proc'] == 'Original') & subject_pet_meta.Sequence.map(lambda x: ( (x.lower().find('tau') > -1) | (x.lower().find('av-1451') > -1) | (x.lower().find('av1451') > -1))) & (subject_pet_meta['Scan Date'] == qc_visit.SCANDATE)] if original_pet_meta.shape[0] < 1: # TODO Log somewhere QC visits without image metadata cprint('No TAU-PET images metadata for subject - ' + subj + ' for visit ' + qc_visit.VISCODE2) continue original_image = original_pet_meta.iloc[0] # Co-registered and Averaged image with the same Series ID of the original image averaged_pet_meta = subject_pet_meta[ (subject_pet_meta['Sequence'] == 'AV1451 Co-registered, Averaged') & (subject_pet_meta['Series ID'] == original_image['Series ID'])] # If an explicit AV1451 Co-registered, Averaged image does not exist, # the original image is already in that preprocessing stage. if averaged_pet_meta.shape[0] < 1: sel_image = original_image original = True else: sel_image = averaged_pet_meta.iloc[0] original = False visit = sel_image.Visit sequence = replace_sequence_chars(sel_image.Sequence) date = sel_image['Scan Date'] study_id = sel_image['Study ID'] series_id = sel_image['Series ID'] image_id = sel_image['Image ID'] row_to_append = pd.DataFrame([[ qc_visit.Phase, subj, qc_visit.VISCODE2, str(visit), sequence, date, str(study_id), str(series_id), str(image_id), original ]], columns=pet_tau_col) pet_tau_df = pet_tau_df.append(row_to_append, ignore_index=True) # Exceptions # ========== conversion_errors = [ # Multiple output images ('098_S_4275', 'm84') ] error_indices = [] for conv_error in conversion_errors: error_indices.append((pet_tau_df.Subject_ID == conv_error[0]) & (pet_tau_df.VISCODE == conv_error[1])) if error_indices: indices_to_remove = pet_tau_df.index[reduce(operator.or_, error_indices, False)] pet_tau_df.drop(indices_to_remove, inplace=True) # Checking for images paths in filesystem images = find_image_path(pet_tau_df, source_dir, 'TAU', 'I', 'Image_ID') tau_csv_path = path.join(dest_dir, 'conversion_info') if not os.path.exists(tau_csv_path): os.mkdir(tau_csv_path) images.to_csv(path.join(tau_csv_path, 'tau_pet_paths.tsv'), sep='\t', index=False) return images
def adni1go2_image(subject_id, timepoint, visit_str, mprage_meta_subj, mri_quality_subj, mayo_mri_qc_subj, preferred_field_strength=3.0): """Selects the preferred scan for a subject in a visit, given the subject belongs to ADNI 1, Go or 2 cohorts Args: subject_id: string containing subject ID timepoint: string of visit code in months visit_str: string of visit name mprage_meta_subj: DataFrame of MPRAGE metadata of images corresponding to the subject mri_quality_subj: DatFrame of MR image quality of images corresponding to the subject mayo_mri_qc_subj: DatFrame of MAYO Clinic MR image quality of images corresponding to the subject preferred_field_strength: Field strength that is preferred in case there are several image acquisitions Returns: Dictionary containing selected scan information """ from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars filtered_mprage = preferred_processed_scan(mprage_meta_subj, visit_str) # If no N3 processed image found (it means there are no processed images at all), get best original image if filtered_mprage.empty: return original_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj, preferred_field_strength) # If there are images with different magnetic field strength, prefer 1.5T images for ADNI1, 3.0T otherwise if len(filtered_mprage.MagStrength.unique()) > 1: filtered_mprage = filtered_mprage[filtered_mprage.MagStrength == preferred_field_strength] # Sort by Series ID in case there are several images, so we keep the one acquired first filtered_mprage = filtered_mprage.sort_values('SeriesID') scan = filtered_mprage.iloc[0] # Check if selected scan passes QC (if QC exists) if not check_qc(scan, subject_id, visit_str, mri_quality_subj): # If not passed, look for another scan in the visit from a different acquisition filtered_mprage = preferred_processed_scan( mprage_meta_subj, visit_str, unwanted_series_id=[scan.SeriesID]) if filtered_mprage.empty: return None scan = filtered_mprage.iloc[0] # Check QC for second scan if not check_qc(scan, subject_id, visit_str, mri_quality_subj): return None n3 = scan.Sequence.find('N3') # Sequence ends in 'N3' or in 'N3m' sequence = scan.Sequence[:n3 + 2 + int(scan.Sequence[n3 + 2] == 'm')] sequence = replace_sequence_chars(sequence) return { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': False }
cond_spgr = ((mprage_meta_subj_orig.Visit == visit_str) & mprage_meta_subj_orig.Sequence.str.contains( "spgr", case=False, na=False) & ~mprage_meta_subj_orig.Sequence.str.contains( "acc", case=False, na=False)) filtered_scan = mprage_meta_subj_orig[cond_mprage | cond_spgr] if filtered_scan.empty: cprint('NO MPRAGE Meta: ' + subject_id + ' for visit ' + timepoint + ' - ' + visit_str) return None scan = select_scan_from_qc(filtered_scan, mayo_mri_qc_subj, preferred_field_strength) sequence = replace_sequence_chars(scan.Sequence) return { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': True }
def adni1_image_refactoring(csv_dir, adnimerge, subject_id, timepoint, visit_str, mprage_meta_subj, mri_quality_subj, mayo_mri_qc_subj): from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars from clinica.utils.stream import cprint # Get the preferred scan (image series that has been Scaled) filtered_mprage = mprage_meta_subj[ (mprage_meta_subj['Orig/Proc'] == 'Processed') & (mprage_meta_subj.Visit == visit_str) & (mprage_meta_subj.Sequence.map(lambda x: x.endswith('Scaled')))] # If there is not a preferred image we use ADNI2 processing # (get the best qc if available, otherwise the original) preferring 1.5T images if filtered_mprage.shape[0] < 1: mprage_meta_subj_orig = mprage_meta_subj[mprage_meta_subj['Orig/Proc'] == 'Original'] return adni2_image(subject_id, timepoint, visit_str, mprage_meta_subj_orig, mayo_mri_qc_subj, preferred_field_strength=1.5) filtered_mprage_mag = filtered_mprage if len(filtered_mprage.MagStrength.unique()) > 1: filtered_mprage_mag = filtered_mprage[filtered_mprage.MagStrength == 1.5] # Select 1.5T images scan = filtered_mprage_mag.iloc[0] series_id = scan.SeriesID qc_passed = True qc = mri_quality_subj[mri_quality_subj.LONIUID == 'S' + str(scan.SeriesID)] if qc.shape[0] > 0 and qc.iloc[0].PASS != 1: # print 'QC found but NOT passed' # print 'Subject ' + subject_id + ' - Series: ' + str(scan.SeriesID) + ' - Study: ' + str(scan.StudyID) mprage_meta_subj_alt = mprage_meta_subj[ (mprage_meta_subj['Orig/Proc'] == 'Original') & (mprage_meta_subj.Visit == visit_str) & (mprage_meta_subj.SeriesID != series_id)] qc_prev_sequence = scan.Sequence scan = mprage_meta_subj_alt.iloc[0] series_id = scan.SeriesID qc_passed = False #TODO replace by the function to find if the scanner is_philips scanner = adni1_select_scanner(subject_id, csv_dir, adnimerge, timepoint) original = True # TODO replace the condition with if is_philips if scanner == 'Philips': scan = (mprage_meta_subj[(mprage_meta_subj['Orig/Proc'] == 'Original') & (mprage_meta_subj.SeriesID == series_id)] ).iloc[0] sequence = scan.Sequence else: # scan already selected above sequence = scan.Sequence[:scan.Sequence.find('N3') - 2] original = False if not qc_passed: if scan.Sequence == 'MP-RAGE': original_img_seq = 'MPR' else: # 'MP-RAGE REPEAT' original_img_seq = 'MPR-R' processing_seq = qc_prev_sequence[ qc_prev_sequence.find(';'):qc_prev_sequence.find('N3') - 2] sequence = original_img_seq + processing_seq # print sequence sequence = replace_sequence_chars(sequence) qc = mri_quality_subj[mri_quality_subj.LONIUID == 'S' + str(scan.SeriesID)] if qc.shape[0] > 0 and qc.iloc[0].PASS != 1: # TODO - LOG THIS cprint('QC found but NOT passed') cprint('Subject ' + subject_id + ' - Series: ' + str(scan.SeriesID) + ' - Study: ' + str(scan.StudyID)) return { 'Subject_ID': subject_id, 'VISCODE': timepoint, 'Visit': visit_str, 'Sequence': sequence, 'Scan_Date': scan.ScanDate, 'Study_ID': str(scan.StudyID), 'Series_ID': str(scan.SeriesID), 'Image_ID': str(scan.ImageUID), 'Field_Strength': scan.MagStrength, 'Original': original }
def adni1go2_image( subject_id, timepoint, visit_str, mprage_meta_subj, mri_quality_subj, mayo_mri_qc_subj, preferred_field_strength=3.0, ): """Select the preferred scan for a subject in a visit, given the subject belongs to ADNI 1, Go or 2 cohorts. Args: subject_id: string containing subject ID timepoint: string of visit code in months visit_str: string of visit name mprage_meta_subj: DataFrame of MPRAGE metadata of images corresponding to the subject mri_quality_subj: DatFrame of MR image quality of images corresponding to the subject mayo_mri_qc_subj: DatFrame of MAYO Clinic MR image quality of images corresponding to the subject preferred_field_strength: Field strength that is preferred in case there are several image acquisitions Returns: Dictionary containing selected scan information """ from clinica.iotools.converters.adni_to_bids.adni_utils import ( replace_sequence_chars, ) # filter out images that do not pass QC mprage_meta_subj = mprage_meta_subj[mprage_meta_subj.apply( lambda x: check_qc(x, subject_id, visit_str, mri_quality_subj), axis=1)] filtered_mprage = preferred_processed_scan(mprage_meta_subj, visit_str) # If no N3 processed image found (it means there are no processed images at all), get best original image if filtered_mprage.empty: return original_image( subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj, preferred_field_strength, ) # If there are images with different magnetic field strength, prefer 1.5T images for ADNI1, 3.0T otherwise if len(filtered_mprage.MagStrength.unique()) > 1: filtered_mprage = filtered_mprage[filtered_mprage.MagStrength == preferred_field_strength] # Sort by Series ID in case there are several images, so we keep the one acquired first filtered_mprage = filtered_mprage.sort_values("SeriesID") scan = filtered_mprage.iloc[0] n3 = scan.Sequence.find("N3") # Sequence ends in 'N3' or in 'N3m' sequence = scan.Sequence[:n3 + 2 + int(scan.Sequence[n3 + 2] == "m")] sequence = replace_sequence_chars(sequence) return { "Subject_ID": subject_id, "VISCODE": timepoint, "Visit": visit_str, "Sequence": sequence, "Scan_Date": scan.ScanDate, "Study_ID": str(scan.StudyID), "Series_ID": str(scan.SeriesID), "Image_ID": str(scan.ImageUID), "Field_Strength": scan.MagStrength, "Original": False, }