def log_prop_vals(prop, saved_file, data, bvecs, idx_mask, idx_array, bval_ind_wb0, bvals_wb0, mask): """ Tensor model calculations of the given property Parameters ---------- prop: str String indicating the property to analyzed 'FA': Fractional anisotropy 'MD': Mean diffusivity saved_file: 'str' Indicate whether or not you want the function to create or use saved parameter files 'no': Function will not create or use saved files 'yes': Function will create or use saved files data: 4 dimensional array Diffusion MRI data bvecs: 3 dimensional array All the b vectors idx_array: ndarray Array with the indices indicating the location of the non-zero values within the mask Returns ------- log_prop: list List of all the log of the desired property values """ prop_dict = {'FA':'FA', 'MD':'MD', 'mean diffusivity':'MD', 'fractional anisotropy':'FA', 'dispersion index':'DI', 'DI':'DI'} log_prop = list() for k in idx_array[:len(idx_array)-1]: if saved_file is 'no': params_file = 'temp' elif saved_file is 'yes': if prop_dict[prop] is 'DI': params_file = 'SparseDeconvolutionModel{0}.nii.gz'.format(k+1) else: params_file = 'TensorModel{0}.nii.gz'.format(k+1) if prop_dict[prop] is "FA": tensor_prop = dti.TensorModel(data[:,:,:,bval_ind_wb0[k]], bvecs[:,bval_ind_wb0[k]], bvals_wb0[k], mask = mask, params_file = params_file) prop_val = tensor_prop.fractional_anisotropy log_prop.append(np.log(prop_val[idx_mask] + 0.01)) elif prop_dict[prop] is "MD": tensor_prop = dti.TensorModel(data[:,:,:,bval_ind_wb0[k]], bvecs[:,bval_ind_wb0[k]], bvals_wb0[k], mask = mask, params_file = params_file) prop_val = tensor_prop.mean_diffusivity log_prop.append(np.log(prop_val[idx_mask] + 0.01)) elif prop_dict[prop] is 'DI': sfm_di = sfm.SparseDeconvolutionModel(data[:,:,:,bval_ind_wb0[k]], bvecs[:,bval_ind_wb0[k]], bvals_wb0[k], mask = mask, params_file = params_file) prop_val = sfm_di.dispersion_index() log_prop.append(prop_val[idx_mask] + 0.01) return log_prop
def test_log_prop_vals(): bvals_wb0_t, bval_ind_wb0_t = test_include_b0vals() tensor_prop1 = dti.TensorModel(data_t[:,:,:,bval_ind_wb0_t[0]], bvecs_t[:,bval_ind_wb0_t[0]], bvals_wb0_t[0], mask = mask_t, params_file = 'temp') tensor_prop2 = dti.TensorModel(data_t[:,:,:,bval_ind_wb0_t[1]], bvecs_t[:,bval_ind_wb0_t[1]], bvals_wb0_t[1], mask = mask_t, params_file = 'temp') log_prop_t = [np.log(tensor_prop1.fractional_anisotropy[idx_mask_t]+0.01), np.log(tensor_prop2.fractional_anisotropy[idx_mask_t]+0.01)] log_prop_a = mf.log_prop_vals('FA', saved_file, data_t, bvecs_t, idx_mask_t, idx_array, bval_ind_wb0_t, bvals_wb0_t, mask_t) for idx in np.arange(len(log_prop_t)): npt.assert_equal(abs(log_prop_t[0][idx]-log_prop_a[0][idx])<0.05, 1) npt.assert_equal(abs(log_prop_t[1][idx]-log_prop_a[1][idx])<0.05, 1) return log_prop_t
def tensor_reliability(SD_1, SD_2, wm_mask): """ given two SD models, calculate the reliability of the PDD of the derived tensor models Parameters ---------- SD_1, SD_2 : SparseDeconvolutionModel class instances wm_mask : ndarray Returns ------- pdd_rel : ndarray The angular difference between the PDD of the two tensor models fit to the predicted data from each of the SD models. """ pred1 = np.concatenate([SD_1.S0[..., np.newaxis], SD_1.fit], -1) pred2 = np.concatenate([SD_2.S0[..., np.newaxis], SD_2.fit], -1) new_bvecs1 = np.concatenate( [np.array([[0, 0, 0]]).T, SD_1.bvecs[:, SD_1.b_idx]], -1) new_bvecs2 = np.concatenate( [np.array([[0, 0, 0]]).T, SD_2.bvecs[:, SD_2.b_idx]], -1) # Freely assume that the bvals are the same new_bvals = np.hstack([0, SD_1.bvals[:, SD_1.b_idx]]) # TM1 = dti.TensorModel(pred1, new_bvecs1, new_bvals, mask=wm_mask, params_file="temp") TM2 = dti.TensorModel(pred2, new_bvecs2, new_bvals, mask=wm_mask, params_file="temp") pdd_rel = oza.pdd_reliability(TM1, TM2) return pdd_rel
def rm_ventricles(wm_data_file, bvals, bvecs, data, data_path): """ Removes the ventricles from the white matter mask and saves a new white matter mask. Parameters ---------- wm_data_file: obj File handle for the white matter mask bvals: 1 dimensional array All b values bvecs: 2 dimensional array All the b vectors data: 4 dimensional array The diffusion data data_path: str Path to the data directory with all the sub files. Returns ------- new_wm_mask: 3 dimensional array A new white matter mask with the ventricles removed. """ # Separate b-values and find the indices corresponding to the b0 and # bNk measurements where N = the lowest b-value other than 0 wm_data = wm_data_file.get_data() bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals) all_b_idx = np.where(bvals_scaled != 0) bNk_b0_inds = np.concatenate((b_inds[0], b_inds[1])) # Fit a tensor model tm = dti.TensorModel(data[..., bNk_b0_inds], bvecs[:, bNk_b0_inds], bvals[bNk_b0_inds], mask=wm_data, params_file="temp") # Find the median, and 25th and 75th percentiles of mean diffusivities md_median = np.median(tm.mean_diffusivity[np.where(wm_data)]) q1 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)], 25) q3 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)], 75) # Exclude voxels with MDs above median + 2*interquartile range md_exclude = md_median + 2 * (q3 - q1) md_include = np.where(tm.mean_diffusivity[np.where(wm_data)] < md_exclude) new_wm_mask = np.zeros(wm_data.shape) new_wm_mask[np.where(wm_data)[0][md_include], np.where(wm_data)[1][md_include], np.where(wm_data)[2][md_include]] = 1 wm = ni.Nifti1Image(new_wm_mask, wm_data_file.get_affine()) ni.save(wm, os.path.join(data_path, 'wm_mask_no_vent.nii.gz')) return new_wm_mask
def make_wm_mask(seg_path, dwi, out_path, cutoff=2): """ Function to make a conservative WM mask, excluding partial volumed voxels Parameters ---------- seg_path : the full path to a white matter segmentation generated with the itkGray conventions (in particular, 3 and 4 are the classification for WM in left and right hemispheres). dwi : the full path to a diffusion imaging data set, which will be used to calculate the mean diffusivity for the exclusion of outliers (presumably partial-volumed with the CSF) OR a TensorModel object, from which the MD will be calculated (which can be a way to save time on the calculation of the tensor parameters out_path : the full path to where you want to put the wm mask once it's calculated cutoff : how many IQR away from median should we cut off on. Note ---- Don't forget to look at your data, to make sure that things are sane. """ # Load the classification information: seg_ni = ni.load(seg_path) seg_vimg = as_volume_img(seg_ni) if isinstance(dwi, str): dwi_ni = ni.load(dwi + '.nii.gz') else: dwi_ni = ni.load(dwi.data_file) vimg = as_volume_img(seg_ni) # This does the magic - resamples using nearest neighbor interpolation: seg_resamp_vimg = vimg.as_volume_img(affine=dwi_ni.get_affine(), shape=dwi_ni.shape[:-1], interpolation='nearest') seg_resamp = seg_resamp_vimg.get_data() # We know that WM is classified as 3 and 4 (for the two different # hemispheres): wm_idx = np.where(np.logical_or(seg_resamp == 3, seg_resamp == 4)) vol = np.zeros(seg_resamp.shape) vol[wm_idx] = 1 if cutoff: if isinstance(dwi, str): # OK - now we need to find and exclude MD outliers: TensorModel = ozm.TensorModel(dwi + '.nii.gz', dwi + '.bvecs', dwi + '.bvals', mask=vol, params_file='temp') else: TensorModel = dwi MD = TensorModel.mean_diffusivity IQR = (stats.scoreatpercentile(MD[np.isfinite(MD)], 75) - stats.scoreatpercentile(MD[np.isfinite(MD)], 25)) co = np.median(MD[np.isfinite(MD)]) + cutoff * IQR co_idx = np.where(MD > co) # Null 'em out: vol[co_idx] = 0 # Now, let's save some output: ni.Nifti1Image(vol, dwi_ni.get_affine()).to_filename(out_path)
metavar='File', help='Output file name (.nii.gz)') parser.add_argument( '--mask_file', action='store', metavar='File', help= 'Mask file (only the voxels within the binary mask will be analyzed (.nii.gz; default: analyze all) ', default=None) params = parser.parse_args() if __name__ == "__main__": Model1 = dti.TensorModel(params.dwi_file1, params.bvecs_file1, params.bvals_file1, mask=params.mask_file, params_file='temp') Model2 = dti.TensorModel(params.dwi_file2, params.bvecs_file2, params.bvals_file2, mask=params.mask_file, params_file='temp') # Do it and save: nib.Nifti1Image(ana.rsquared(Model1, Model2), Model1.affine).to_filename(params.out_file)
data_path = oio.data_path rrmse_dti = {} rrmse_ssd = {} for subject in ['FP', 'HT']: subject_path = os.path.join(data_path, subject) wm_mask_file = os.path.join(subject_path, '%s_wm_mask.nii.gz'%subject) wm_nifti = ni.load(wm_mask_file).get_data() wm_mask = np.zeros(wm_nifti.shape) wm_mask[np.where(wm_nifti==1)] = 1 wm_idx = np.where(wm_nifti>0) rrmse_dti[subject] = {} rrmse_ssd[subject] = {} for b in [1000, 2000, 4000]: data_1, data_2 = oio.get_dwi_data(b, subject=subject) TM1 = dti.TensorModel(*data_1, mask=wm_mask) TM2 = dti.TensorModel(*data_2, mask=wm_mask) rrmse_dti[subject][b] = ozm.cross_predict(TM1, TM2) print subject print b rmse_mask = rrmse_dti[subject][b][wm_idx] print "DTI: %s voxels above 1"%(len(np.where(rmse_mask>1)[0])/float(len(rmse_mask))) print "Median rRMSE: %s"%np.median(rmse_mask) ad_rd = oio.get_ad_rd(subject, b) SD1 = ssd.SparseDeconvolutionModel(*data_1, mask=wm_mask, axial_diffusivity=ad_rd[0]['AD'], radial_diffusivity=ad_rd[0]['RD']) SD2 = ssd.SparseDeconvolutionModel(*data_2, mask=wm_mask, axial_diffusivity=ad_rd[1]['AD'], radial_diffusivity=ad_rd[1]['RD']) rrmse_ssd[subject][b] = ozm.cross_predict(SD1, SD2)
def initial_params(data, bvecs, bvals, model, mask=None, params_file='temp'): """ Determine the initial values for fitting the isotropic diffusion model. This only works on the models that fit to the relative diffusion signal. Parameters ---------- data: 4 dimensional array Diffusion MRI data bvecs: 2 dimensional array All the b vectors bvals: 1 dimensional array All b values model: str Isotropic model mask: 3 dimensional array Mask of the data params_file: obj or str File handle of the param_files containing the tensor parameters. Returns ------- bounds: list A list containing the bounds for each parameter for least squares fitting. initial: list A list containing the initial values for each parameter for least squares fitting. """ dti_mod = dti.TensorModel(data, bvecs, bvals, mask=mask, params_file=params_file) d = dti_mod.mean_diffusivity[np.where(mask)] # Find initial noise floor _, b_inds, _, _ = ozu.separate_bvals(bvals) b0_data = data[np.where(mask)][:, b_inds[0]] #nf = np.std(b0_data, -1)/np.mean(b0_data, -1) nf = np.min(data[np.where(mask)], -1) if model == single_exp_rs: bounds = [(0, 4)] initial = d elif model == single_exp_nf_rs: bounds = [(0, 10000), (0, 4)] initial = np.concatenate( [nf[..., None], np.ones(d[..., None].shape)], -1) elif model == bi_exp_rs: bounds = [(0, 1), (0, 4), (0, 4)] initial = np.concatenate( [0.5 * np.ones((len(d), 1)), d[..., None], d[..., None]], -1) elif model == bi_exp_nf_rs: bounds = [(0, 10000), (0, 1), (0, 4), (0, 4)] initial = np.concatenate([ nf[..., None], 0.5 * np.ones( (len(d), 1)), d[..., None], d[..., None] ], -1) return bounds, initial
bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1], bounds_min[2]:bounds_max[2]] = 1 mask_corpus_callosum, cfa = segment_from_cfa(tensorfit, CC_box, threshold, return_cfa=True) # Clean up the cc isolation new_mask = isolate_cc(mask_corpus_callosum) tm = dti.TensorModel(bnk_data, bvecs[:, bnk_b0_inds], bvals[bnk_b0_inds], mask=new_mask, params_file="temp") # Extract the median axial and radial diffusivities for a tensor fit to # the voxels in the corpus callosum. ad_arr[b_idx - 1] = np.median(tm.axial_diffusivity[np.where(new_mask)]) rd_arr[b_idx - 1] = np.median( tm.radial_diffusivity[np.where(new_mask)]) os.chdir(data_path) # Save a mask of the corpus callsoum. aff = data_file.get_affine() nib.Nifti1Image(new_mask).to_filename( os.path.join(data_path, "cc_mask.nii.gz"))