data_path = '/home/arokem/data/osmosis/' for subject in ['FP']:#, 'HT']: subject_path = os.path.join(data_path, subject) wm_mask_file = os.path.join(subject_path, '%s_wm_mask.nii.gz'%subject) wm_nifti = ni.load(wm_mask_file) wm_data = wm_nifti.get_data() cc_file = os.path.join(data_path,subject,'%s_cc.nii.gz'%subject) cc_nifti = ni.load(cc_file) cc_data = cc_nifti.get_data() idx = np.where(np.logical_and(cc_data, wm_data)) for b in [1000, 2000, 4000]: data_1, data_2 = oio.get_dwi_data(b, subject=subject) TM1 = dti.TensorModel(*data_1, mask=wm_data) TM2 = dti.TensorModel(*data_2, mask=wm_data) for TM in [TM1, TM2]: AD = TM.axial_diffusivity[idx] RD = TM.radial_diffusivity[idx] MD = TM.mean_diffusivity[idx] print('For: %s and b=%s: AD=%1.4f, RD=%1.4f, MD=%1.4f'%( subject, b, np.median(AD), np.median(RD), np.median(MD))) idx_AD = np.argmin(np.abs(AD - np.median(AD))) idx_RD = np.argmin(np.abs(RD - np.median(RD))) idx_MD = np.argmin(np.abs(MD - np.median(MD))) print('For: %s and b=%s: AD idx=%s, RD idx =%s, MD idx=%s'%( subject, b, idx_AD, idx_RD, idx_MD))
data_path = '/hsgs/u/arokem/tmp/' ssh = sge.SSH(hostname='proclus.stanford.edu', username='******', port=22) batch_sge = [] for subject in ['FP']: #,'HT'] subject_path = os.path.join(oio.data_path, subject) wm_mask_file = os.path.join(subject_path, '%s_wm_mask.nii.gz' % subject) wm_nifti = ni.load(wm_mask_file) wm_data = wm_nifti.get_data() n_wm_vox = np.sum(wm_data) wm_file = "%s_wm_mask.nii.gz" % subject for b in [1000, 2000, 4000]: ad_rd = oio.get_ad_rd(subject, b) for data_i, data in enumerate(oio.get_dwi_data(b, subject)): file_stem = (data_path + '%s/' % subject + data[0].split('/')[-1].split('.')[0]) for l1_ratio in l1_ratios: for alpha in alphas: for i in range(int(n_wm_vox / 10000) + 2): params_file = "%s_SSD_l1ratio%s_alpha%s_%03d.nii.gz" % ( file_stem, l1_ratio, alpha, i) params_dict = dict(data_path=data_path, i=i, data_i=data_i, subject=subject, b=b, l1_ratio=l1_ratio, alpha=alpha,
# Freely assume that the bvals are the same new_bvals = np.hstack([0, SD_1.bvals[:,SD_1.b_idx]]) # TM1 = dti.TensorModel(pred1, new_bvecs1, new_bvals, mask=wm_mask, params_file="temp") TM2 = dti.TensorModel(pred2, new_bvecs2, new_bvals, mask=wm_mask, params_file="temp") pdd_rel = oza.pdd_reliability(TM1, TM2) return pdd_rel if __name__=="__main__": oio.data_path = data_path data = oio.get_dwi_data(b, subject) ad_rd = oio.get_ad_rd(subject, b) subject_path = os.path.join(data_path, subject) wm_mask_file = os.path.join(subject_path, wm_file) wm_nifti = ni.load(wm_mask_file) wm_data = wm_nifti.get_data() wm_idx = np.where(wm_data==1) # Preserve memory by getting rid of this data: del wm_data # Now set the mask: mask = np.zeros(wm_nifti.shape) mask[wm_idx[0], wm_idx[1], wm_idx[2]] = 1 solver_params = dict(alpha=alpha,
data_path = '/hsgs/u/arokem/tmp/' ssh = sge.SSH(hostname='proclus.stanford.edu',username='******', port=22) batch_sge = [] for subject in ['FP']: #,'HT'] subject_path = os.path.join(oio.data_path, subject) wm_mask_file = os.path.join(subject_path, '%s_wm_mask.nii.gz'%subject) wm_nifti = ni.load(wm_mask_file) wm_data = wm_nifti.get_data() n_wm_vox = np.sum(wm_data) wm_file = "%s_wm_mask.nii.gz"%subject for b in [1000, 2000, 4000]: ad_rd = oio.get_ad_rd(subject, b) for data_i, data in enumerate(oio.get_dwi_data(b, subject)): file_stem = (data_path + '%s/'%subject + data[0].split('/')[-1].split('.')[0]) for l1_ratio in l1_ratios: for alpha in alphas: for i in range(int(n_wm_vox/10000)+2): params_file="%s_SSD_l1ratio%s_alpha%s_%03d.nii.gz"%( file_stem, l1_ratio, alpha, i) params_dict = dict( data_path=data_path, i=i, data_i=data_i,
# Lines above this one are auto-generated by the wrapper to provide as params: # data_path, b, subject, data_i, alpha, rho, wm_file, i, ad, rd, params_file import osmosis.model.sparse_deconvolution as ssd import osmosis.io as oio import nibabel as ni import os import numpy as np if __name__ == "__main__": oio.data_path = data_path data = oio.get_dwi_data(b, subject)[data_i] solver_params = dict(alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, positive=True) subject_path = os.path.join(data_path, subject) wm_mask_file = os.path.join(subject_path, wm_file) wm_nifti = ni.load(wm_mask_file) wm_data = wm_nifti.get_data() wm_idx = np.where(wm_data == 1) low = i * 10000 # Make sure not to go over the edge of the mask: high = np.min([(i + 1) * 10000, int(np.sum(wm_data))]) # Preserve memory by getting rid of this data: del wm_data # Now set the mask: mask = np.zeros(wm_nifti.shape)
data_path = oio.data_path rrmse_dti = {} rrmse_ssd = {} for subject in ['FP', 'HT']: subject_path = os.path.join(data_path, subject) wm_mask_file = os.path.join(subject_path, '%s_wm_mask.nii.gz'%subject) wm_nifti = ni.load(wm_mask_file).get_data() wm_mask = np.zeros(wm_nifti.shape) wm_mask[np.where(wm_nifti==1)] = 1 wm_idx = np.where(wm_nifti>0) rrmse_dti[subject] = {} rrmse_ssd[subject] = {} for b in [1000, 2000, 4000]: data_1, data_2 = oio.get_dwi_data(b, subject=subject) TM1 = dti.TensorModel(*data_1, mask=wm_mask) TM2 = dti.TensorModel(*data_2, mask=wm_mask) rrmse_dti[subject][b] = ozm.cross_predict(TM1, TM2) print subject print b rmse_mask = rrmse_dti[subject][b][wm_idx] print "DTI: %s voxels above 1"%(len(np.where(rmse_mask>1)[0])/float(len(rmse_mask))) print "Median rRMSE: %s"%np.median(rmse_mask) ad_rd = oio.get_ad_rd(subject, b) SD1 = ssd.SparseDeconvolutionModel(*data_1, mask=wm_mask, axial_diffusivity=ad_rd[0]['AD'], radial_diffusivity=ad_rd[0]['RD']) SD2 = ssd.SparseDeconvolutionModel(*data_2, mask=wm_mask, axial_diffusivity=ad_rd[1]['AD'], radial_diffusivity=ad_rd[1]['RD'])