def test_separate_bvals(): bval_list, bval_ind, unique_b = snr.separate_bvals(bvals_t) npt.assert_equal(unique_b, unique_b_t) for i in np.arange(len(bval_list)): npt.assert_equal(np.squeeze(bval_list)[i], bval_list_t[i]) npt.assert_equal(np.squeeze(bval_ind)[i], bval_ind_t[i])
def test_separate_bvals(): bval_list, bval_ind, unique_b, bvals_scaled = snr.separate_bvals(bvals_t) npt.assert_equal(unique_b, unique_b_t) npt.assert_equal(bvals_scaled, bvals_scaled_t) for i in np.arange(len(bval_list)): npt.assert_equal(np.squeeze(bval_list)[i], bval_list_t[i]) npt.assert_equal(np.squeeze(bval_ind)[i], bval_ind_t[i])
def place_predict(files): data_path = "/biac4/wandell/data/klchan13/100307/Diffusion/data" # Get file object data_file = nib.load(os.path.join(data_path, "data.nii.gz")) wm_data_file = nib.load(os.path.join(data_path,"wm_mask_registered.nii.gz")) # Get data and indices data = data_file.get_data() wm_data = wm_data_file.get_data() wm_idx = np.where(wm_data==1) # b values bvals = np.loadtxt(os.path.join(data_path, "bvals")) bval_list, b_inds, unique_b, rounded_bvals = snr.separate_bvals(bvals/1000) all_b_idx = np.squeeze(np.where(rounded_bvals != 0)) all_predict_brain = ozu.nans((wm_data_file.shape + bvals.shape)) bvals_predict_brain = ozu.nans((wm_data_file.shape + bvals.shape)) # Keep track of files in case there are any missing files i_track = np.ones(1830) for f_idx in np.arange(len(files)): this_file = files[f_idx] predict_data = nib.load(this_file).get_data() if this_file[0:11] == "all_predict": i = int(this_file.split(".")[0][11:]) print "Placing all_predict %4.2f of 1830"%(i+1) low = i*70 high = np.min([(i+1) * 70, int(np.sum(wm_data))]) all_predict_brain[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = predict_data elif this_file[0:13] == "bvals_predict": i = int(this_file.split(".")[0][13:]) print "Placing bvals_predict %4.2f of 1830"%(i+1) low = i*70 high = np.min([(i+1) * 70, int(np.sum(wm_data))]) bvals_predict_brain[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = predict_data i_track[i] = 0 actual = data[wm_idx, :][:, all_b_idx] missing_files = np.where(i_track) rmse_b = np.sqrt(np.mean((actual - all_predict_brain[wm_idx])**2,-1)) rmse_mb = p.sqrt(np.mean((actual - bvals_predict_brain[wm_idx])**2,-1)) # Save the rmse and predict data aff = data_file.get_affine() nib.Nifti1Image(all_predict_brain, aff).to_filename("all_predict_brain.nii.gz") nib.Nifti1Image(bvals_predict_brain, aff).to_filename("bvals_predict_brain.nii.gz") rmse_aff = np.eye(4) nib.Nifti1Image(rmse_b_flat, rmse_aff).to_filename("rmse_b_flat.nii.gz") nib.Nifti1Image(rmse_mb_flat, rmse_aff).to_filename("rmse_mb_flat.nii.gz") return missing_files, all_predict_brain, bvals_predict_brain, rmse_b_flat, rmse_mb_flat
def slope(data, bvals, bvecs, prop, mask="None", saved_file="yes"): """ Calculates and displays the slopes of a least squares solution fitted to either the log of the fractional anisotropy data or mean diffusivity data of the tensor model across the brain at different b values. Parameters ---------- data: 4 dimensional array or Nifti1Image Diffusion MRI data bvals: 1 dimensional array All b values bvecs: 3 dimensional array All the b vectors prop: str String indicating the property to analyzed 'FA': Fractional anisotropy 'MD': Mean diffusivity mask: 3 dimensional array or Nifti1Image Brain mask of the data saved_file: 'str' Indicate whether or not you want the function to create or use saved parameter files 'no': Function will not create or use saved files 'yes': Function will create or use saved files Returns ------- slopeProp_all: 3 dimensional array Slope of the desired property across b values at each voxel """ # Making sure inputs are all in the right format for calculations data, mask = obtain_data(data, mask) # Separate b values bval_list, bval_ind, unique_b = separate_bvals(bvals) idx_array = np.arange(len(unique_b)) # Add b = 0 values and indices to the other b values for tensor calculation bval_ind_wb0, bvals_wb0 = include_b0vals(idx_array, bval_ind, bval_list) # Find the property values for each grouped b values idx_mask = np.where(mask) log_prop = log_prop_vals(prop, saved_file, data, bvecs, idx_mask, idx_array, bval_ind_wb0, bvals_wb0, mask) # Convert list into a matrix and make a matrix with b values. ls_fit = ls_fit_b(log_prop, unique_b) # Plot slopes slopeProp_all = plot_slopes(mask, ls_fit) return slopeProp_all
def scat_prop_snrSlope(log_prop, data, bvals, mask): """ Displays a scatter density plot of the slopes of the log of the desired property values versus the slopes of the first order fit through SNR. Parameters ---------- log_prop: list List of all the log of the desired property values data: 4 dimensional array Diffusion MRI data bvals: 1 dimensional array All b values mask: 3 dimensional array Brain mask of the data """ bval_list, bval_ind, unique_b = snr.separate_bvals(bvals) ls_fit_bsnr = snr_ls_fit(data, bvals, mask, unique_b) ls_fit_prop = ls_fit_b(log_prop, unique_b) mpl.scatter_density(ls_fit_bsnr[0,:], ls_fit_prop[0,:])
import osmosis.predict_n as pn import osmosis.snr as snr data_path_dwi = "/biac4/wandell/data/klchan13/100307/Diffusion/data" red_data = nib.load(os.path.join(data_path_dwi, "data.nii.gz")).get_data() mask_data = nib.load(os.path.join(data_path_dwi, "nodif_brain_mask.nii.gz")).get_data() bvals = np.loadtxt(os.path.join(data_path_dwi, 'bvals')) bvecs = np.loadtxt(os.path.join(data_path_dwi, 'bvecs')) mask = np.zeros(mask_data.shape) mask[0:2, 0:2, 0:2] = 1 ad = {1000: 1, 2000: 1, 3000: 1} rd = {1000: 0, 2000: 0, 3000: 0} bval_list, b_inds, unique_b, rounded_bvals = snr.separate_bvals(bvals) mb = sfm.SparseDeconvolutionModelMultiB(red_data, bvecs, bvals, mask=mask, axial_diffusivity=ad, radial_diffusivity=rd, params_file="temp") sd = sfm.SparseDeconvolutionModel(red_data, bvecs, bvals, mask=mask, axial_diffusivity=ad, radial_diffusivity=rd,
import osmosis.model.sparse_deconvolution as sfm import osmosis.predict_n as pn import osmosis.snr as snr data_path_dwi = "/biac4/wandell/data/klchan13/100307/Diffusion/data" red_data = nib.load(os.path.join(data_path_dwi, "data.nii.gz")).get_data() mask_data = nib.load(os.path.join(data_path_dwi, "nodif_brain_mask.nii.gz")).get_data() bvals = np.loadtxt(os.path.join(data_path_dwi,'bvals')) bvecs = np.loadtxt(os.path.join(data_path_dwi,'bvecs')) mask = np.zeros(mask_data.shape) mask[0:2, 0:2, 0:2] = 1 ad = {1000:1, 2000:1, 3000:1} rd = {1000:0, 2000:0, 3000:0} bval_list, b_inds, unique_b, rounded_bvals = snr.separate_bvals(bvals) mb = sfm.SparseDeconvolutionModelMultiB(red_data, bvecs, bvals, mask = mask, axial_diffusivity = ad, radial_diffusivity = rd, params_file = "temp") sd = sfm.SparseDeconvolutionModel(red_data, bvecs, bvals, mask = mask, axial_diffusivity = ad, radial_diffusivity = rd, params_file = "temp") def bare_predict(): mod.predict(bvecs[:, b_inds[1][0:9]], bvals[b_inds[1][0:9]])
def place_predict(files): data_path = "/biac4/wandell/data/klchan13/100307/Diffusion/data" # Get file object data_file = nib.load(os.path.join(data_path, "data.nii.gz")) wm_data_file = nib.load( os.path.join(data_path, "wm_mask_registered.nii.gz")) # Get data and indices data = data_file.get_data() wm_data = wm_data_file.get_data() wm_idx = np.where(wm_data == 1) # b values bvals = np.loadtxt(os.path.join(data_path, "bvals")) bval_list, b_inds, unique_b, rounded_bvals = snr.separate_bvals(bvals / 1000) all_b_idx = np.squeeze(np.where(rounded_bvals != 0)) all_predict_brain = ozu.nans((wm_data_file.shape + bvals.shape)) bvals_predict_brain = ozu.nans((wm_data_file.shape + bvals.shape)) # Keep track of files in case there are any missing files i_track = np.ones(1830) for f_idx in np.arange(len(files)): this_file = files[f_idx] predict_data = nib.load(this_file).get_data() if this_file[0:11] == "all_predict": i = int(this_file.split(".")[0][11:]) print "Placing all_predict %4.2f of 1830" % (i + 1) low = i * 70 high = np.min([(i + 1) * 70, int(np.sum(wm_data))]) all_predict_brain[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = predict_data elif this_file[0:13] == "bvals_predict": i = int(this_file.split(".")[0][13:]) print "Placing bvals_predict %4.2f of 1830" % (i + 1) low = i * 70 high = np.min([(i + 1) * 70, int(np.sum(wm_data))]) bvals_predict_brain[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = predict_data i_track[i] = 0 actual = data[wm_idx, :][:, all_b_idx] missing_files = np.where(i_track) rmse_b = np.sqrt(np.mean((actual - all_predict_brain[wm_idx])**2, -1)) rmse_mb = p.sqrt(np.mean((actual - bvals_predict_brain[wm_idx])**2, -1)) # Save the rmse and predict data aff = data_file.get_affine() nib.Nifti1Image(all_predict_brain, aff).to_filename("all_predict_brain.nii.gz") nib.Nifti1Image(bvals_predict_brain, aff).to_filename("bvals_predict_brain.nii.gz") rmse_aff = np.eye(4) nib.Nifti1Image(rmse_b_flat, rmse_aff).to_filename("rmse_b_flat.nii.gz") nib.Nifti1Image(rmse_mb_flat, rmse_aff).to_filename("rmse_mb_flat.nii.gz") return missing_files, all_predict_brain, bvals_predict_brain, rmse_b_flat, rmse_mb_flat