def update_with_4d_data(self): """Get mean values for skeleton files in the significant voxels Args: skeleton_files: list of Path objects, skeleton file locations. Retrun: df: pandas dataframe of 'corrp_file', 'skeleton_file', 'average' TODO: - save significant voxels - parallelize - think about using all_modality_merged images? """ merged_4d_data = get_nifti_data(self.merged_4d_file) # get a map with significant voxels significant_cluster_data = np.where(self.corrp_data >= self.threshold, 1, 0) self.cluster_averages = {} # Get average of values in the `significant_cluster_data` map # for each skeleton volume for vol_num in np.arange(merged_4d_data.shape[3]): vol_data = merged_4d_data[:, :, :, vol_num] average = vol_data[significant_cluster_data == 1].mean() self.cluster_averages[vol_num] = average self.cluster_averages_df = pd.DataFrame.from_dict( self.cluster_averages, orient='index', columns=[ f'{self.modality} values in the significant ' f'cluster {self.name}' ])
def summary(self): """Summarize skeleton""" # list of all skeleton nifti files in numpy arrays arrays = [get_nifti_data(x) for x in self.skeleton_files] # merge skeleton files self.merged_skeleton_data = np.stack(arrays, axis=3) self.means = [x[np.nonzero(x)].mean() for x in arrays] self.df['mean'] = self.means self.stds = [x[np.nonzero(x)].std() for x in arrays] self.df['std'] = self.stds self.mean = self.merged_skeleton_data[np.nonzero( self.merged_skeleton_data)].mean() self.std = self.merged_skeleton_data[np.nonzero( self.merged_skeleton_data)].std() self.merged_data_df = pd.DataFrame({ 'merged mean': [self.mean], 'merged std': [self.std] })
def summary(self): """Summarize skeleton""" # list of all skeleton nifti files in numpy arrays arrays = [get_nifti_data(x) for x in self.skeleton_files] # merge skeleton files self.merged_skeleton_data = np.stack(arrays, axis=3) self.mask_4d = np.broadcast_to(self.sig_mask, self.merged_skeleton_data.shape) self.means = [x[self.mask == 1].mean() for x in arrays] self.df['mean'] = self.means self.stds = [x[self.mask == 1].std() for x in arrays] self.df['std'] = self.stds self.mean = self.merged_skeleton_data[self.mask_4d == 1].mean() self.std = self.merged_skeleton_data[self.mask_4d == 1].std() self.merged_data_df = pd.DataFrame({ 'merged mean': [self.mean], 'merged std': [self.std] })
def subject_level_summary_with_mask(self, mask, threshold): """Summarize subject skeletons Attributes: subject_nonzero_means: list, mean of non-zero skeleton subject_nonzero_stds: list, std of non-zero skeleton """ mask_data = get_nifti_data(mask) mask_data = np.where(mask_data > threshold, 1, 0) # Non-zero mean values in each subject skeleton self.subject_masked_means = [] self.subject_masked_means_left = [] self.subject_masked_means_right = [] self.subject_masked_stds = [] self.subject_nonzero_voxel_count = [] # loop through each subject array for vol_num in np.arange(self.merged_skeleton_data.shape[-1]): vol_data = self.merged_skeleton_data[:, :, :, vol_num] * mask_data left_vol_data = vol_data[90:, :, :] right_vol_data = vol_data[:90, :, :] non_zero_mean = vol_data[np.nonzero(vol_data)].mean() non_zero_mean_left = left_vol_data[np.nonzero( left_vol_data)].mean() non_zero_mean_right = right_vol_data[np.nonzero( right_vol_data)].mean() non_zero_std = vol_data[np.nonzero(vol_data)].std() non_zero_voxel_count = len(np.where(vol_data != 0)[0]) self.subject_masked_means.append(non_zero_mean) self.subject_masked_means_left.append(non_zero_mean_left) self.subject_masked_means_right.append(non_zero_mean_right) self.subject_masked_stds.append(non_zero_std) self.subject_nonzero_voxel_count.append(non_zero_voxel_count)
def __init__(self, merged_skeleton_loc, mask_loc): """initialize mergedSkeleton object""" self.merged_skeleton_loc = merged_skeleton_loc self.skel_mask_loc = mask_loc # load merged skeleton nifti print(f"Reading {merged_skeleton_loc}") self.merged_skeleton_img, self.merged_skeleton_data = \ get_nifti_img_data(merged_skeleton_loc) print(f"Completed reading {merged_skeleton_loc}") # load mask as boolean array self.mask_data = get_nifti_data(mask_loc) == 1 # binarize merged skeleton map print(f"Estimating sum of binarized skeleton maps for all subject") self.merged_skeleton_data_bin_sum = np.sum(np.where( self.merged_skeleton_data == 0, 0, 1), axis=3) print(f"Estimating mean of binarized skeleton maps for all subject") self.merged_skeleton_data_bin_mean = np.mean(np.where( self.merged_skeleton_data == 0, 0, 1), axis=3)
def subject_level_summary_with_warp(self, warp_dir, caselist): """Summarize subject skeletons Attributes: subject_nonzero_means: list, mean of non-zero skeleton subject_nonzero_stds: list, std of non-zero skeleton """ # zero in the skeleton self.subject_zero_skeleton_values = [] with open(caselist, 'r') as f: cases = [x.strip() for x in f.readlines()] # loop through each subject array for vol_num in np.arange(self.merged_skeleton_data.shape[-1]): vol_data = self.merged_skeleton_data[:, :, :, vol_num] subject_id = cases[vol_num] warp_data_loc = list(Path(warp_dir).glob(f'*{subject_id}*'))[0] warp_data = get_nifti_data(warp_data_loc) # zero where the mask is not zero zero_in_the_skeleton_coord = np.where((self.mask_data == 1) & (vol_data == 0)) self.subject_zero_skeleton_values.append( warp_data[zero_in_the_skeleton_coord])
def get_figure_enigma(self, **kwargs): # TODO replace this function with nifti_snapshot # TODO add skeleton check functions to the randomise_summary """Fig and axes attribute to CorrpMap""" # if study template is not ENIGMA if 'mean_fa' in kwargs: mean_fa_loc = kwargs.get('mean_fa') print(f'background image : {mean_fa_loc}') self.enigma_fa_data = get_nifti_data(mean_fa_loc) mean_fa_skel_loc = re.sub('.nii.gz', '_skeleton.nii.gz', mean_fa_loc) print(f'background skeleton image: {mean_fa_skel_loc}') self.enigma_skeleton_data = get_nifti_data(mean_fa_skel_loc) else: # self.enigma_fa_data = get_nifti_data(self.enigma_fa_loc) self.enigma_fa_data = get_nifti_data(self.fa_bg_loc) self.enigma_skeleton_data = get_nifti_data(self.skel_mask_loc) # figure settings self.ncols = 5 self.nrows = 4 size_w = 4 size_h = 4 # When study template is used, slice_gap=3 is too wide) if self.data_shape[-1] < 100: slice_gap = 2 else: slice_gap = 3 # Get the center of data center_of_data = np.array( ndimage.measurements.center_of_mass( self.enigma_fa_data)).astype(int) # Get the center slice number z_slice_center = center_of_data[-1] # Get the slice numbers in array nslice = self.ncols * self.nrows slice_nums = np.arange(z_slice_center - (nslice * slice_gap), z_slice_center + (nslice * slice_gap), slice_gap)[::2] # if corrpMap.corrp_data_filled exist if hasattr(self, 'corrp_data_filled'): data = np.where(self.corrp_data_filled == 0, np.nan, self.corrp_data_filled) elif hasattr(self, 'type'): if self.type in ['average', 'std', 'bin_sum', 'bin_sum_diff']: # for skeleton std data plot data = np.where(self.corrp_data == 0, np.nan, self.corrp_data) else: # Make voxels with their intensities lower than data_vmin # transparent data = np.where(self.corrp_data < self.threshold, np.nan, self.corrp_data) # TODO put below to above if hasattr(self, 'vmin'): vmin = self.vmin else: vmin = self.threshold if hasattr(self, 'vmax'): if self.vmax == 'free': vmax = self.corrp_data.max() else: vmax = self.vmax else: vmax = 1 self.tbssFigure = nifti_snapshot.TbssFigure( template=self.template, image_data_list=[data], output_file=self.out_image_loc, cmap_list=['autumn'], cbar_titles=[self.cbar_title], alpha_list=[1], title=self.title) # below is self.tbssFigure.create_figure_one_map() self.tbssFigure.images_mask_out_the_zero() self.tbssFigure.images_mask_by_threshold(0.95) # self.tbssFigure.loop_through_axes_draw_bg() self.tbssFigure.loop_through_axes_draw_bg_tbss() self.tbssFigure.annotate_with_z() self.tbssFigure.loop_through_axes_draw_images_corrp_map(0.95) self.tbssFigure.cbar_x = 0.25 self.tbssFigure.cbar_width = 0.5 self.tbssFigure.add_cbars_horizontal() # self.fig = self.tbssFigure.fig self.tbssFigure.fig.suptitle(self.tbssFigure.title, y=0.92, fontsize=25) self.tbssFigure.fig.savefig(self.tbssFigure.output_file, dpi=200)