def internal_cons(self, splithalfno, test_inds): rng = np.random.RandomState(splithalfno) split1, split2 = utils.splithalf(self.neural_feats_reps[:, test_inds], rng=rng) r = utils.pearsonr_matrix(split1, split2) rc = utils.spearman_brown_correct(r, n=2) return rc
def splithalf_corr(data, niter=10, seed=None): rng = np.random.RandomState(seed) df = [] for i in range(niter): split1, split2 = utils.splithalf(data, rng=rng) r = utils.pearsonr_matrix(split1, split2) rc = utils.spearman_brown_correct(r, n=2) df.extend([(i, site, rci) for site, rci in enumerate(rc)]) df = pandas.DataFrame(df, columns=['splithalf', 'site', 'internal_cons']) return df
def mapping_cons(self, splithalfno, train_inds, test_inds): """ Split data in half over reps, run PLS on each half on the train set, get predictions for the test set, correlate the two, Spearman-Brown """ rng = np.random.RandomState(splithalfno) split1, split2 = utils.splithalf(self.neural_feats_reps[:, train_inds], rng=rng) self.reg.fit(self.model_feats[train_inds], split1) pred1 = self.reg.predict(self.model_feats[test_inds]) self.reg.fit(self.model_feats[train_inds], split2) pred2 = self.reg.predict(self.model_feats[test_inds]) rs = utils.pearsonr_matrix(pred1, pred2) return np.squeeze(utils.spearman_brown_correct(rs, n=2))
def mapping_cons(self, splithalfno, train_inds, test_inds): """ Split data in half over reps, run PLS on each half on the train set, get predictions for the test set, correlate the two, Spearman-Brown """ rs = [] rng = np.random.RandomState(splithalfno) for site in range(self.neural_feats_reps.shape[-1]): split1, split2 = utils.splithalf(self.neural_feats_reps[:, train_inds, site], rng=rng) self.reg.fit(self.model_feats[train_inds], split1) pred1 = self.reg.predict(self.model_feats[test_inds]) self.reg.fit(self.model_feats[train_inds], split2) pred2 = self.reg.predict(self.model_feats[test_inds]) r = scipy.stats.pearsonr(pred1, pred2)[0] rs.append(r) return np.squeeze(utils.spearman_brown_correct(rs, n=2))