Exemple #1
0
 def internal_cons(self, splithalfno, test_inds):
     rng = np.random.RandomState(splithalfno)
     split1, split2 = utils.splithalf(self.neural_feats_reps[:, test_inds],
                                      rng=rng)
     r = utils.pearsonr_matrix(split1, split2)
     rc = utils.spearman_brown_correct(r, n=2)
     return rc
Exemple #2
0
 def raw_fit(self, train_inds, test_inds):
     neural_feats = np.nanmean(self.neural_feats_reps, axis=0)
     self.reg.fit(self.model_feats[train_inds],
                  np.squeeze(neural_feats[train_inds]))
     pred = np.squeeze(self.reg.predict(self.model_feats[test_inds]))
     actual = neural_feats[test_inds]
     rs = utils.pearsonr_matrix(actual, pred)
     return np.squeeze(rs)
Exemple #3
0
def splithalf_corr(data, niter=10, seed=None):
    rng = np.random.RandomState(seed)
    df = []
    for i in range(niter):
        split1, split2 = utils.splithalf(data, rng=rng)
        r = utils.pearsonr_matrix(split1, split2)
        rc = utils.spearman_brown_correct(r, n=2)
        df.extend([(i, site, rci) for site, rci in enumerate(rc)])
    df = pandas.DataFrame(df, columns=['splithalf', 'site', 'internal_cons'])
    return df
Exemple #4
0
 def mapping_cons(self, splithalfno, train_inds, test_inds):
     """
     Split data in half over reps, run PLS on each half on the train set,
     get predictions for the test set, correlate the two, Spearman-Brown
     """
     rng = np.random.RandomState(splithalfno)
     split1, split2 = utils.splithalf(self.neural_feats_reps[:, train_inds],
                                      rng=rng)
     self.reg.fit(self.model_feats[train_inds], split1)
     pred1 = self.reg.predict(self.model_feats[test_inds])
     self.reg.fit(self.model_feats[train_inds], split2)
     pred2 = self.reg.predict(self.model_feats[test_inds])
     rs = utils.pearsonr_matrix(pred1, pred2)
     return np.squeeze(utils.spearman_brown_correct(rs, n=2))
Exemple #5
0
def nfit(model_feats,
         neural,
         labels,
         n_splits=10,
         n_components=200,
         test_size=.25):
    if model_feats.shape[1] > n_components:
        model_feats = PCA(n_components=n_components).fit_transform(model_feats)
    skf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size)
    df = []
    for it, (train_idx, test_idx) in enumerate(skf.split(model_feats, labels)):
        reg = PLSRegression(n_components=25, scale=False)
        reg.fit(model_feats[train_idx], neural[train_idx])
        pred = reg.predict(model_feats[test_idx])
        rs = utils.pearsonr_matrix(neural[test_idx], pred)
        df.extend([(it, site, r) for site, r in enumerate(rs)])
    df = pandas.DataFrame(df, columns=['split', 'site', 'fit_r'])
    return df