def get_curvature_from_test(hemi='lh'): """ 这里就是把45个retest被试在test session里的曲率拿过来了 """ import nibabel as nib from magicbox.io.io import save2nifti # inputs src_file = pjoin(proj_dir, f'analysis/s2/{hemi}/curvature.nii.gz') subj_test_file = pjoin(proj_dir, 'analysis/s2/subject_id') subj_retest_file = pjoin(work_dir, 'subject_id') # outputs out_file = pjoin(work_dir, f'curvature_{hemi}_ses-test.nii.gz') # prepare curv_maps = nib.load(src_file).get_fdata() subj_ids_test = open(subj_test_file).read().splitlines() subj_ids_retest = open(subj_retest_file).read().splitlines() retest_indices = [subj_ids_test.index(i) for i in subj_ids_retest] # calculate curv_retest = curv_maps[..., retest_indices] # save save2nifti(out_file, curv_retest)
def calc_prob_map(hemi='lh'): import numpy as np import nibabel as nib from cxy_hcp_ffa.lib.predefine import roi2label from magicbox.io.io import save2nifti # inputs n_roi = len(roi2label) print(n_roi) roi_file = pjoin(work_dir, f'rois_v3_{hemi}.nii.gz') # outputs out_file = pjoin(work_dir, f'prob_maps_v3_{hemi}.nii.gz') # prepare rois = nib.load(roi_file).get_fdata().squeeze().T n_vtx = rois.shape[1] # calculate prob_maps = np.ones((n_roi, n_vtx)) * np.nan for idx, roi in enumerate(roi2label.keys()): label = roi2label[roi] print(roi) # get indices of subjects which contain the roi indices = rois == label subj_indices = np.any(indices, 1) # calculate roi probability map among valid subjects prob_map = np.mean(indices[subj_indices], 0) prob_maps[idx] = prob_map # save out save2nifti(out_file, prob_maps.T[:, None, None, :])
def roi_stats(gid=1, hemi='lh'): import numpy as np import nibabel as nib import pickle as pkl from cxy_hcp_ffa.lib.predefine import roi2label from magicbox.io.io import save2nifti # inputs rois = ('IOG-face', 'pFus-face', 'mFus-face') gid_file = pjoin(work_dir, f'group_id_{hemi}.npy') roi_file = pjoin( proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/' f'rois_v3_{hemi}.nii.gz') # outputs rois_info_file = pjoin(work_dir, f'rois_info_{gid}_{hemi}.pkl') prob_maps_file = pjoin(work_dir, f'prob_maps_{gid}_{hemi}.nii.gz') # load gid_idx_vec = np.load(gid_file) == gid roi_maps = nib.load(roi_file).get_data().squeeze().T[gid_idx_vec] # prepare rois_info = dict() prob_maps = np.zeros((roi_maps.shape[1], 1, 1, len(rois)), dtype=np.float64) # calculate for roi_idx, roi in enumerate(rois): label = roi2label[roi] rois_info[roi] = dict() # get indices of subjects which contain the roi indices = roi_maps == label subj_indices = np.any(indices, 1) # calculate the number of the valid subjects n_subject = np.sum(subj_indices) rois_info[roi]['n_subject'] = n_subject # calculate roi sizes for each valid subject sizes = np.sum(indices[subj_indices], 1) rois_info[roi]['sizes'] = sizes # calculate roi probability map among valid subjects prob_map = np.mean(indices[subj_indices], 0) prob_maps[:, 0, 0, roi_idx] = prob_map # save pkl.dump(rois_info, open(rois_info_file, 'wb')) save2nifti(prob_maps_file, prob_maps)
def get_mpm(gh_id=11, hemi='lh'): """maximal probability map""" import numpy as np import nibabel as nib from commontool.io.io import save2nifti thr = 0.25 prob_map_file = pjoin(split_dir, f'prob_maps_GH{gh_id}_{hemi}.nii.gz') out_file = pjoin(split_dir, f'MPM_GH{gh_id}_{hemi}.nii.gz') prob_maps = nib.load(prob_map_file).get_data() supra_thr_idx_arr = prob_maps > thr prob_maps[~supra_thr_idx_arr] = 0 mpm = np.argmax(prob_maps, 3) mpm[np.any(prob_maps, 3)] += 1 # save save2nifti(out_file, mpm)
def get_mpm(hemi='lh'): """maximal probability map""" import numpy as np import nibabel as nib from cxy_hcp_ffa.lib.predefine import roi2label from magicbox.io.io import save2nifti # inputs thr = 0.25 map_indices = [1, 2] idx2roi = {0: 'IOG-face', 1: 'pFus-face', 2: 'mFus-face'} prob_file = pjoin(work_dir, f'prob_maps_v3_{hemi}.nii.gz') # outputs out_file = pjoin(work_dir, f'MPM_v3_{hemi}_{thr}_FFA.nii.gz') # prepare prob_maps = nib.load(prob_file).get_fdata()[..., map_indices] mpm_map = np.zeros(prob_maps.shape[:3]) # calculate supra_thr_idx_arr = prob_maps > thr valid_idx_arr = np.any(supra_thr_idx_arr, 3) valid_arr = prob_maps[valid_idx_arr, :] mpm_tmp = np.argmax(valid_arr, -1) for i, idx in enumerate(map_indices): roi = idx2roi[idx] idx_arr = np.zeros_like(mpm_map, dtype=np.bool8) idx_arr[valid_idx_arr] = mpm_tmp == i mpm_map[idx_arr] = roi2label[roi] # verification valid_supra_thr_idx_arr = supra_thr_idx_arr[valid_idx_arr, :] valid_count_vec = np.sum(valid_supra_thr_idx_arr, -1) valid_count_vec_uniq = np.zeros_like(valid_count_vec) for i in range(len(valid_count_vec)): valid_supra_thr_idx_vec = valid_supra_thr_idx_arr[i] valid_count_vec_uniq[i] = \ len(set(valid_arr[i, valid_supra_thr_idx_vec])) assert np.all(valid_count_vec == valid_count_vec_uniq) # save save2nifti(out_file, mpm_map)
def calc_mean_meas_map(meas='thickness'): """ 得到1080个被试的平均map """ import numpy as np from magicbox.io.io import CiftiReader, save2nifti from cxy_hcp_ffa.lib.predefine import hemi2stru # inputs hemis = ('lh', 'rh') subj_file = pjoin(proj_dir, 'analysis/s2/subject_id') meas_id_file = pjoin(proj_dir, 'data/HCP/subject_id_1096') meas2file = { 'thickness': '/nfs/p1/public_dataset/datasets/hcp/DATA/' 'HCP_S1200_GroupAvg_v1/HCP_S1200_GroupAvg_v1/' 'S1200.All.thickness_MSMAll.32k_fs_LR.dscalar.nii', 'myelin': '/nfs/p1/public_dataset/datasets/hcp/DATA/' 'HCP_S1200_GroupAvg_v1/HCP_S1200_GroupAvg_v1/' 'S1200.All.MyelinMap_BC_MSMAll.32k_fs_LR.dscalar.nii', 'va': '/nfs/p1/public_dataset/datasets/hcp/DATA/' 'HCP_S1200_GroupAvg_v1/HCP_S1200_GroupAvg_v1/' 'S1200.All.midthickness_MSMAll_va.32k_fs_LR.dscalar.nii' } # outputs out_file = pjoin(work_dir, '{meas}_mean-1080_{hemi}.nii.gz') # prepare subj_ids = open(subj_file).read().splitlines() meas_reader = CiftiReader(meas2file[meas]) meas_ids = open(meas_id_file).read().splitlines() meas_indices = [meas_ids.index(i) for i in subj_ids] # calculate for hemi in hemis: meas_maps = meas_reader.get_data(hemi2stru[hemi], True)[meas_indices] meas_mean = np.mean(meas_maps, 0) meas_mean = np.expand_dims(meas_mean, (1, 2)) save2nifti(out_file.format(meas=meas, hemi=hemi), meas_mean)
def get_curvature(hemi='lh'): """ 把45个retest被试在retest session的curvature合并成左右脑两个nifti文件, 主要是为了我那个程序在标定个体ROI的时候读取和显示曲率。 之前服务器上没有retest的结构数据,我想当然地认为同一个被试的沟回曲率在 两次测量应该是一模一样的,所以在标定v1和v2版ROI的时候参照的是test session的曲率; 这次我下了retest的结构数据,决定用retest session的曲率校对一下retest个体ROI。 """ import numpy as np from magicbox.io.io import CiftiReader, save2nifti from cxy_hcp_ffa.lib.predefine import hemi2stru # inputs hemis = ('lh', 'rh') fpath = '/nfs/m1/hcp/retest/{0}/MNINonLinear/fsaverage_LR32k/'\ '{0}.curvature_MSMAll.32k_fs_LR.dscalar.nii' subj_id_file = pjoin(work_dir, 'subject_id') # outputs out_file = pjoin(work_dir, 'curvature_{hemi}.nii.gz') # prepare subj_ids = open(subj_id_file).read().splitlines() n_subj = len(subj_ids) hemi2data = {} for hemi in hemis: hemi2data[hemi] = np.zeros((32492, 1, 1, n_subj), np.float64) # calculate for subj_idx, subj_id in enumerate(subj_ids): reader = CiftiReader(fpath.format(subj_id)) for hemi in hemis: data_tmp = reader.get_data(hemi2stru[hemi], True)[0] hemi2data[hemi][:, 0, 0, subj_idx] = data_tmp print(f'Finished: {subj_idx+1}/{n_subj}') # save for hemi in hemis: save2nifti(out_file.format(hemi=hemi), hemi2data[hemi])
def calc_prob_map(): import numpy as np import nibabel as nib from cxy_hcp_ffa.lib.predefine import roi2label from magicbox.io.io import save2nifti hemis = ('lh', 'rh') rois = ('IOG-face', 'pFus-face', 'mFus-face') n_roi = len(rois) gh_ids = (1, 2, 11, 12, 21, 22) gh_id_file = pjoin(split_dir, 'half_id_{}.npy') roi_file = pjoin( proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/' 'rois_v3_{}.nii.gz') out_file = pjoin(split_dir, 'prob_maps_GH{}_{}.nii.gz') for hemi in hemis: gh_id_vec = np.load(gh_id_file.format(hemi)) roi_maps = nib.load(roi_file.format(hemi)).get_data().squeeze().T for gh_id in gh_ids: gh_id_idx_vec = gh_id_vec == gh_id roi_maps_tmp = roi_maps[gh_id_idx_vec] prob_maps = np.zeros((n_roi, roi_maps.shape[1])) for idx, roi in enumerate(rois): label = roi2label[roi] # get indices of subjects which contain the roi indices = roi_maps_tmp == label subj_indices = np.any(indices, 1) # calculate roi probability map among valid subjects prob_map = np.mean(indices[subj_indices], 0) prob_maps[idx] = prob_map # save out save2nifti(out_file.format(gh_id, hemi), prob_maps.T[:, None, None, :])