def compute_pval_rsa(seed): stim, voxels = load_data(n_samples, n_features, model=model, seed=seed, heteroscedastic=heteroscedastic) # compute similarity stim_ = stim if stim.shape[1] == 1: stim_ = np.hstack((stim, - stim)) stim_similarity = square_pdist(stim_) # np.corrcoef(stim_) voxels_similarity = square_pdist(voxels) # np.corrcoef(voxels) # indices to extract lower triangular part of a matrix lw_idx = np.triu_indices(n_samples, k=1) stim_vsim = stim_similarity[lw_idx] voxels_vsim = voxels_similarity[lw_idx] # compute the statistic # T = np.corrcoef(stim_vsim, voxels_vsim)[0, 1] T = spearmanr(voxels_vsim, stim_vsim)[0] T_perm = [] for i in range(n_draws): # permute the labels perm = np.random.permutation(n_samples) # voxels_vsim_perm = np.corrcoef(voxels[perm])[lw_idx] voxels_vsim_perm = square_pdist(voxels[perm])[lw_idx] # compute the test statistic # T_perm.append(np.corrcoef(voxels_vsim_perm, stim_vsim)[0, 1]) T_perm.append(spearmanr(voxels_vsim_perm, stim_vsim)[0]) pval = 1 - percentileofscore(np.array(T_perm), T) / 100. return pval
# # True: Both models perform equally well # False: Linear models outperform RSA pvals_rsa = [] a_space = np.linspace(0, .3, 11) for a_i in a_space: T_ai = [] # average across repetitions for i in range(n_repeats): stim, voxels = load_data( n_samples, n_voxels, activation=a_i, model=model, seed=i, random_effects=random_effects) # compute similarity stim_similarity = (stim - stim.T) ** 2 voxels_similarity = - square_pdist(voxels) # np.corrcoef(voxels) # extract lower triangular part of symmetric similarity lw_idx = np.triu_indices(n_samples, k=1) stim_vsim = stim_similarity[lw_idx] voxels_vsim = voxels_similarity[lw_idx] # compute the statistic T_ai.append(spearmanr(stim_vsim, voxels_vsim)[0]) T_perm = [] for _ in range(n_perm): # permute the labels perm = np.random.permutation(n_samples) voxels_vsim_perm = - square_pdist(voxels[perm])[lw_idx]