def stimuli_recon(prf_dir, db_dir, subj_id, roi): """Reconstruct stimulus based on pRF model.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load model parameters roi_dir = os.path.join(prf_dir, roi) val_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy')) filters = np.load(os.path.join(roi_dir, 'filters.npy')) recon_imgs = np.zeros((val_fmri_ts.shape[1], 500, 500)) # fMRI data z-score print 'fmri data temporal z-score' m = np.mean(val_fmri_ts, axis=1, keepdims=True) s = np.std(val_fmri_ts, axis=1, keepdims=True) val_fmri_ts = (val_fmri_ts - m) / (1e-10 + s) # select significant predicted voxels sel_vxls = np.nonzero(val_corr >= 0.24)[0] for i in range(val_fmri_ts.shape[1]): print 'Reconstruct stimilus %s' % (i + 1) tmp = np.zeros((500, 500)) for j in sel_vxls: tmp += val_fmri_ts[int(j), int(i)] * filters[j] recon_imgs[i] = tmp np.save(os.path.join(roi_dir, 'recon_img.npy'), recon_imgs)
def null_distribution_prf_tunning(feat_dir, prf_dir, db_dir, subj_id, roi): """Generate Null distribution of pRF model tunning using validation data.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'), mmap_mode='r') # output directory config roi_dir = os.path.join(prf_dir, roi) # load selected model parameters paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy')) sel_model = np.load(os.path.join(roi_dir, 'reg_sel_model.npy')) null_corr = np.zeros((paras.shape[0], 1000)) for i in range(paras.shape[0]): print 'Voxel %s' % (i) # load features feats = np.array(val_models[int(sel_model[i]), ...]).astype(np.float64) feats = zscore(feats.T).T pred = np.dot(feats, paras[i]) for j in range(1000): shuffled_val_ts = np.random.permutation(val_fmri_ts[i]) null_corr[i, j] = np.corrcoef(pred, shuffled_val_ts)[0, 1] np.save(os.path.join(roi_dir, 'random_corr.npy'), null_corr)
def filter_recon(prf_dir, db_dir, subj_id, roi): """Reconstruct filter map of each voxel based on selected model.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts del val_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # output config roi_dir = os.path.join(prf_dir, roi) # pRF estimate sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy')) sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy')) sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy')) filters = np.zeros((sel_models.shape[0], 500, 500)) fig_dir = os.path.join(roi_dir, 'filters') check_path(fig_dir) thr = 0.24 # gabor bank generation gwt = bob.ip.gabor.Transform(number_of_scales=9) gwt.generate_wavelets(500, 500) spatial_gabors = np.zeros((72, 500, 500)) for i in range(72): w = bob.ip.gabor.Wavelet(resolution=(500, 500), frequency=gwt.wavelet_frequencies[i]) sw = bob.sp.ifft(w.wavelet.astype(np.complex128)) spatial_gabors[i, ...] = np.roll(np.roll(np.real(sw), 250, 0), 250, 1) for i in range(sel_models.shape[0]): if sel_model_corr[i] < thr: continue print 'Voxel %s, Val Corr %s' % (i, sel_model_corr[i]) model_idx = int(sel_models[i]) # get gaussian pooling field parameters si = model_idx / 2500 xi = (model_idx % 2500) / 50 yi = (model_idx % 2500) % 50 x0 = np.arange(5, 500, 10)[xi] y0 = np.arange(5, 500, 10)[yi] sigma = [1] + [n * 5 for n in range(1, 13)] + [70, 80, 90, 100] s = sigma[si] print 'center: %s, %s, sigma: %s' % (y0, x0, s) kernel = make_2d_gaussian(500, s, center=(x0, y0)) kpos = np.nonzero(kernel > 0.00000001) paras = sel_paras[i] tmp_file = os.path.join(fig_dir, 'tmp_kernel.npy') tmp_filter = np.memmap(tmp_file, dtype='float64', mode='w+', shape=(72, 500, 500)) Parallel(n_jobs=25)(delayed(filter_pro)(tmp_filter, paras, kernel, kpos, spatial_gabors, gwt_idx) for gwt_idx in range(72)) tmp_filter = np.array(tmp_filter) filters[i] = tmp_filter.sum(axis=0) os.system('rm %s' % (tmp_file)) im_file = os.path.join(fig_dir, 'Voxel_%s_%s.png' % (i + 1, vxl_idx[i])) vutil.save_imshow(filters[i], im_file) np.save(os.path.join(roi_dir, 'filters.npy'), filters)
def gabor_contribution2prf(feat_dir, prf_dir, db_dir, subj_id, roi): """Calculate tunning contribution of each gabor sub-banks.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'), mmap_mode='r') # load selected model parameters roi_dir = os.path.join(prf_dir, roi) paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy')) sel_model = np.load(os.path.join(roi_dir, 'reg_sel_model.npy')) gabor_corr = np.zeros((paras.shape[0], 9)) for i in range(paras.shape[0]): print 'Voxel %s' % (i) # load features feats = np.array(val_models[int(sel_model[i]), ...]).astype(np.float64) feats = zscore(feats.T).T for j in range(9): pred = np.dot(feats[:, (j * 8):(j * 8 + 8)], paras[i, (j * 8):(j * 8 + 8)]) gabor_corr[i, j] = np.corrcoef(pred, val_fmri_ts[i])[0, 1] np.save(os.path.join(roi_dir, 'gabor_contributes.npy'), gabor_corr)
def prf_selection(feat_dir, prf_dir, db_dir, subj_id, roi): """Select best model for each voxel and validating.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models val_models = np.load(os.path.join(feat_dir, 'val_candidate_model.npy'), mmap_mode='r') # output directory config roi_dir = os.path.join(prf_dir, roi) # load candidate model parameters paras = np.load(os.path.join(roi_dir, 'reg_paras.npy')) mcorr = np.load(os.path.join(roi_dir, 'reg_model_corr.npy')) alphas = np.load(os.path.join(roi_dir, 'reg_alphas.npy')) sel_paras = np.zeros((mcorr.shape[1], 72)) sel_model = np.zeros(mcorr.shape[1]) sel_model_corr = np.zeros(mcorr.shape[1]) for i in range(mcorr.shape[1]): maxi = np.argmax(np.nan_to_num(mcorr[:, i])) print 'Voxel %s - Max corr %s - Model %s' % (i, mcorr[maxi, i], maxi) print 'Alpha : %s' % (alphas[maxi, i]) sel_paras[i] = paras[maxi, i] sel_model[i] = maxi feats = np.array(val_models[maxi, ...]).astype(np.float64) feats = zscore(feats.T).T pred = np.dot(feats, sel_paras[i]) sel_model_corr[i] = np.corrcoef(pred, val_fmri_ts[i])[0, 1] print 'Val Corr : %s' % (sel_model_corr[i]) np.save(os.path.join(roi_dir, 'reg_sel_paras.npy'), sel_paras) np.save(os.path.join(roi_dir, 'reg_sel_model.npy'), sel_model) np.save(os.path.join(roi_dir, 'reg_sel_model_corr.npy'), sel_model_corr)
def get_vxl_idx(prf_dir, db_dir, subj_id, roi): """Get voxel index in specific ROI""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) print 'Voxel number: %s' % (len(vxl_idx)) roi_dir = os.path.join(prf_dir, roi) check_path(roi_dir) np.save(os.path.join(roi_dir, 'vxl_idx.npy'), vxl_idx)
def prf_recon(prf_dir, db_dir, subj_id, roi): """Reconstruct pRF based on selected model.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts del val_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # output directory config roi_dir = os.path.join(prf_dir, roi) # pRF estimate sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy')) sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy')) sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy')) prfs = np.zeros((sel_models.shape[0], 500, 500)) fig_dir = os.path.join(roi_dir, 'figs') check_path(fig_dir) for i in range(sel_models.shape[0]): # get pRF print 'Voxel %s, Val Corr %s' % (i, sel_model_corr[i]) model_idx = int(sel_models[i]) # get gaussian pooling field parameters si = model_idx / 2500 xi = (model_idx % 2500) / 50 yi = (model_idx % 2500) % 50 x0 = np.arange(5, 500, 10)[xi] y0 = np.arange(5, 500, 10)[yi] sigma = [1] + [n * 5 for n in range(1, 13)] + [70, 80, 90, 100] s = sigma[si] kernel = make_2d_gaussian(500, s, center=(x0, y0)) kpos = np.nonzero(kernel) paras = sel_paras[i] for f in range(9): fwt = np.sum(paras[(f * 8):(f * 8 + 8)]) fs = np.sqrt(2)**f * 4 for p in range(kpos[0].shape[0]): tmp = make_2d_gaussian(500, fs, center=(kpos[1][p], kpos[0][p])) prfs[i] += fwt * kernel[kpos[0][p], kpos[1][p]] * tmp if sel_model_corr[i] >= 0.24: prf_file = os.path.join(fig_dir, 'Voxel_%s_%s.png' % (i + 1, vxl_idx[i])) vutil.save_imshow(prfs[i], prf_file) np.save(os.path.join(roi_dir, 'prfs.npy'), prfs)
#fpfs = np.zeros((vxl_idx.shape[0], 250, 250)) #biases = np.zeros((vxl_idx.shape[0],)) #for i in range(vxl_idx.shape[0]): # print 'Voxel %s - %s'%(i, vxl_idx[i]) # vxl_dir = os.path.join(roi_dir, 'voxel_%s'%(vxl_idx[i]), 'refine') # fpf, b, wt = get_prf_weights(vxl_dir) # outfile = os.path.join(vxl_dir, 'model_wts') # np.savez(outfile, fpf=fpf, wt=wt, bias=b) # fpfs[i, ...] = fpf # wts[i] = wt # biases[i] = b #model_wts_file = os.path.join(roi_dir, 'merged_model_wts') #np.savez(model_wts_file, fpfs=fpfs, wts=wts, biases=biases) #-- visual reconstruction using cnn-prf vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) ts_m = np.mean(val_ts, axis=1, keepdims=True) ts_s = np.std(val_ts, axis=1, keepdims=True) val_ts = (val_ts - ts_m) / (ts_s + 1e-5) # load estimated prf parameters model_wts = np.load(os.path.join(roi_dir, 'merged_model_wts.npz')) # load model test result dl_test_r2 = np.load(os.path.join(roi_dir, 'dl_prf_refine_test_r2.npy')) # select voxels thres = 0.1 sel_idx = np.nonzero(dl_test_r2 >= thres)[0] print 'Select %s voxels for image reconstruction' % (sel_idx.shape[0]) sel_wts = model_wts['wts'][sel_idx] sel_fpfs = model_wts['fpfs'][sel_idx] sel_bias = model_wts['biases'][sel_idx] # get voxel response and reconstruct image
def ridge_regression_model_test(prf_dir, db_dir, subj_id, roi): """Test pRF model derived from ridge regression with test dataset.""" # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del train_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models val_models = np.load(os.path.join(db_dir, 'val_candidate_model.npy'), mmap_mode='r') # fMRI data z-score print 'fmri data temporal z-score' m = np.mean(val_fmri_ts, axis=1, keepdims=True) s = np.std(val_fmri_ts, axis=1, keepdims=True) val_fmri_ts = (val_fmri_ts - m) / (1e-5 + s) # output directory config roi_dir = os.path.join(prf_dir, roi) check_path(roi_dir) # load selected models and the corresponding parameters val_r2_file = os.path.join(roi_dir, 'reg_val_r2.npy') val_r2 = np.load(val_r2_file, mmap_mode='r') paras_file = os.path.join(roi_dir, 'reg_paras.npy') paras = np.load(paras_file) alphas_file = os.path.join(roi_dir, 'reg_alphas.npy') alphas = np.load(alphas_file) # output var test_r2 = np.zeros(len(vxl_idx)) prf_pos = np.zeros((len(vxl_idx), 3)) # parameter candidates alpha_list = np.logspace(-2, 3, 10) sigma = [2, 4, 8, 16, 32, 60, 70, 80, 90, 100] for i in range(len(vxl_idx)): print '----------------' print 'Voxel %s' % (i) vxl_r2 = np.nan_to_num(val_r2[i, ...]) sel_mdl_i, sel_alpha_i = np.unravel_index(vxl_r2.argmax(), vxl_r2.shape) print 'Select model %s' % (sel_mdl_i) print 'Select alpha value %s - %s' % (alpha_list[sel_alpha_i], alphas[i]) # get model position info xi = (sel_mdl_i % 2500) / 50 yi = (sel_mdl_i % 2500) % 50 x0 = np.arange(2, 250, 5)[xi] y0 = np.arange(2, 250, 5)[yi] s = sigma[sel_mdl_i / 2500] prf_pos[i] = np.array([y0, x0, s]) # compute r^2 using test dataset test_x = np.array(val_models[sel_mdl_i, ...]).astype(np.float64) test_x = np.concatenate((np.ones((120, 1)), test_x), axis=1) val_pred = test_x.dot(paras[i]) ss_tol = np.var(val_fmri_ts[i]) * 120 r2 = 1.0 - np.sum(np.square(val_fmri_ts[i] - val_pred)) / ss_tol print 'r-square on test dataset: %s' % (r2) test_r2[i] = r2 # save output np.save(os.path.join(roi_dir, 'reg_prf_test_r2.npy'), test_r2) np.save(os.path.join(roi_dir, 'sel_reg_prf_pos.npy'), prf_pos)
def ridge_regression(prf_dir, db_dir, subj_id, roi): """pRF model fitting using ridge regression. 90% trainning data used for model tuning, and another 10% data used for model seletion. """ # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del val_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models train_models = np.load(os.path.join(db_dir, 'train_candidate_model.npy'), mmap_mode='r') # output directory config roi_dir = os.path.join(prf_dir, roi) check_path(roi_dir) # model seletion and tuning ALPHA_NUM = 10 paras_file = os.path.join(roi_dir, 'reg_paras.npy') paras = np.memmap(paras_file, dtype='float64', mode='w+', shape=(len(vxl_idx), 73)) val_r2_file = os.path.join(roi_dir, 'reg_val_r2.npy') val_r2 = np.memmap(val_r2_file, dtype='float64', mode='w+', shape=(len(vxl_idx), 25000, ALPHA_NUM)) alphas_file = os.path.join(roi_dir, 'reg_alphas.npy') alphas = np.memmap(alphas_file, dtype='float64', mode='w+', shape=(len(vxl_idx))) # fMRI data z-score print 'fmri data temporal z-score' m = np.mean(train_fmri_ts, axis=1, keepdims=True) s = np.std(train_fmri_ts, axis=1, keepdims=True) train_fmri_ts = (train_fmri_ts - m) / (1e-5 + s) # split training dataset into model tunning set and model selection set tune_fmri_ts = train_fmri_ts[:, :int(1750 * 0.9)] sel_fmri_ts = train_fmri_ts[:, int(1750 * 0.9):] # model fitting for i in range(len(vxl_idx)): print '-----------------' print 'Voxel %s' % (i) for j in range(25000): #print 'Model %s'%(j) # remove models which centered outside the 20 degree of visual angle xi = (j % 2500) / 50 yi = (j % 2500) % 50 x0 = np.arange(2, 250, 5)[xi] y0 = np.arange(2, 250, 5)[yi] d = np.sqrt(np.square(x0 - 125) + np.square(y0 - 125)) if d > 124: #print 'Model center outside the visual angle' paras[i, ...] = np.NaN val_r2[i, j, :] = np.NaN continue train_x = np.array(train_models[j, ...]).astype(np.float64) # split training dataset into model tunning and selection sets tune_x = train_x[:int(1750 * 0.9), :] sel_x = train_x[int(1750 * 0.9):, :] for a in range(ALPHA_NUM): alpha_list = np.logspace(-2, 3, ALPHA_NUM) # model fitting reg = linear_model.Ridge(alpha=alpha_list[a]) reg.fit(tune_x, tune_fmri_ts[i]) val_pred = reg.predict(sel_x) ss_tol = np.var(sel_fmri_ts[i]) * 175 r2 = 1.0 - np.sum( np.square(sel_fmri_ts[i] - val_pred)) / ss_tol val_r2[i, j, a] = r2 # select best model vxl_r2 = np.nan_to_num(val_r2[i, ...]) sel_mdl_i, sel_alpha_i = np.unravel_index(vxl_r2.argmax(), vxl_r2.shape) train_x = np.array(train_models[sel_mdl_i, ...]).astype(np.float64) # split training dataset into model tunning and selection sets tune_x = train_x[:int(1750 * 0.9), :] sel_x = train_x[int(1750 * 0.9):, :] alpha_list = np.logspace(-2, 3, ALPHA_NUM) # selected model fitting reg = linear_model.Ridge(alpha=alpha_list[sel_alpha_i]) reg.fit(tune_x, tune_fmri_ts[i]) val_pred = reg.predict(sel_x) ss_tol = np.var(sel_fmri_ts[i]) * 175 r2 = 1.0 - np.sum(np.square(sel_fmri_ts[i] - val_pred)) / ss_tol print 'r-square recal: %s' % (r2) #print 'r-square cal: %s'%(vxl_r2.max()) paras[i, ...] = np.concatenate((np.array([reg.intercept_]), reg.coef_)) alphas[i] = alpha_list[sel_alpha_i] # save output paras = np.array(paras) np.save(paras_file, paras) val_r2 = np.array(val_r2) np.save(val_r2_file, val_r2) alphas = np.array(alphas) np.save(alphas_file, alphas)
def ridge_fitting(feat_dir, prf_dir, db_dir, subj_id, roi): """pRF model fitting using ridge regression. 90% trainning data used for model tuning, and another 10% data used for model seletion. """ # load fmri response vxl_idx, train_fmri_ts, val_fmri_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi) del val_fmri_ts print 'Voxel number: %s' % (len(vxl_idx)) # load candidate models train_models = np.load(os.path.join(feat_dir, 'train_candidate_model.npy'), mmap_mode='r') # output directory config roi_dir = os.path.join(prf_dir, roi) check_path(roi_dir) # model seletion and tuning ALPHA_NUM = 20 BOOTS_NUM = 15 paras_file = os.path.join(roi_dir, 'reg_paras.npy') paras = np.memmap(paras_file, dtype='float64', mode='w+', shape=(42500, len(vxl_idx), 72)) mcorr_file = os.path.join(roi_dir, 'reg_model_corr.npy') mcorr = np.memmap(mcorr_file, dtype='float64', mode='w+', shape=(42500, len(vxl_idx))) alphas_file = os.path.join(roi_dir, 'reg_alphas.npy') alphas = np.memmap(alphas_file, dtype='float64', mode='w+', shape=(42500, len(vxl_idx))) # fMRI data z-score print 'fmri data temporal z-score' m = np.mean(train_fmri_ts, axis=1, keepdims=True) s = np.std(train_fmri_ts, axis=1, keepdims=True) train_fmri_ts = (train_fmri_ts - m) / (1e-10 + s) # split training dataset into model tunning set and model selection set tune_fmri_ts = train_fmri_ts[:, :int(1750 * 0.9)] sel_fmri_ts = train_fmri_ts[:, int(1750 * 0.9):] # model testing for i in range(42500): print 'Model %s' % (i) # remove models which centered outside the 20 degree of visual angle xi = (i % 2500) / 50 yi = (i % 2500) % 50 x0 = np.arange(5, 500, 10)[xi] y0 = np.arange(5, 500, 10)[yi] d = np.sqrt(np.square(x0 - 250) + np.square(y0 - 250)) if d > 249: print 'Model center outside the visual angle' paras[i, ...] = np.NaN mcorr[i] = np.NaN alphas[i] = np.NaN continue train_x = np.array(train_models[i, ...]).astype(np.float64) train_x = zscore(train_x.T).T # split training dataset into model tunning and selection sets tune_x = train_x[:int(1750 * 0.9), :] sel_x = train_x[int(1750 * 0.9):, :] wt, r, alpha, bscores, valinds = ridge.bootstrap_ridge( tune_x, tune_fmri_ts.T, sel_x, sel_fmri_ts.T, alphas=np.logspace(-2, 3, ALPHA_NUM), nboots=BOOTS_NUM, chunklen=175, nchunks=1, single_alpha=False, use_corr=False) paras[i, ...] = wt.T mcorr[i] = r alphas[i] = alpha # save output paras = np.array(paras) np.save(paras_file, paras) mcorr = np.array(mcorr) np.save(mcorr_file, mcorr) alphas = np.array(alphas) np.save(alphas_file, alphas)