# only 1 frequency band
C = C[:, None, :]

# %% define the pipelines

scoring = 'neg_mean_absolute_error'

# define spatial filters
common = ProjCommonSpace(n_compo=48, scale='auto', reg=1e-5)
identity = ProjIdentitySpace()
spoc = ProjSPoCSpace(n_compo=4, scale='auto', reg=1e-5, shrink=0.5)

# define featuring
diag = Diag()
logdiag = LogDiag()
riemann = Riemann(n_fb=1, metric='riemann')

sc = StandardScaler()

# define algo
logreg = RidgeCV(alphas=np.logspace(-5, 3, 25), scoring=scoring)

# define models
pipelines = {
    'diag': make_pipeline(identity, logdiag, sc, logreg),
    'spoc': make_pipeline(spoc, logdiag, sc, logreg),
    'riemann': make_pipeline(common, riemann, sc, logreg)
}

# %% decoding experiments
Exemplo n.º 2
0
        fname = op.join(cfg.path_outputs, 'covs_allch_oas.h5')
    else:
        fname = op.join(cfg.path_outputs, 'covs_allch_oas_%d.h5' % duration)
    covs = mne.externals.h5io.read_hdf5(fname)
    subjects = [d['subject'] for d in covs if 'subject' in d]
    covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
    X = np.array(covs)
    n_sub, n_fb, n_ch, _ = X.shape
    print(f'Duration: {duration} - #subjects: {n_sub}')

    part = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
    y = part.set_index('Observations').age.loc[subjects]

    identity = ProjIdentitySpace()
    common = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg)
    riemann = Riemann(n_fb=n_fb, metric=metric)
    logdiag = LogDiag()
    sc = StandardScaler()
    ridge = RidgeCV(alphas=np.logspace(-3, 5, 100))
    cv = KFold(n_splits=n_splits, shuffle=True, random_state=seed)

    pipe = make_pipeline(common, riemann, sc, ridge)
    score = -cross_val_score(pipe,
                             X=X,
                             y=y,
                             cv=cv,
                             n_jobs=n_jobs,
                             scoring='neg_mean_absolute_error')
    scores_model.append(score)

    pipe = make_pipeline(identity, logdiag, sc, ridge)
    'dummy':
    make_pipeline(ProjIdentitySpace(), LogDiag(), StandardScaler(),
                  DummyRegressor()),
    'naive':
    make_pipeline(ProjIdentitySpace(), NaiveVec(method='upper'),
                  StandardScaler(), RidgeCV(alphas=ridge_shrinkage)),
    'log-diag':
    make_pipeline(ProjIdentitySpace(), LogDiag(), StandardScaler(),
                  RidgeCV(alphas=ridge_shrinkage)),
    'spoc':
    make_pipeline(
        ProjSPoCSpace(n_compo=n_compo, scale=scale, reg=0, shrink=0.5),
        LogDiag(), StandardScaler(), RidgeCV(alphas=ridge_shrinkage)),
    'riemann':
    make_pipeline(ProjCommonSpace(scale=scale, n_compo=n_compo, reg=1.e-05),
                  Riemann(n_fb=n_fb, metric=metric), StandardScaler(),
                  RidgeCV(alphas=ridge_shrinkage))
}

n_components = np.arange(1, 103, 1)


def run_low_rank(n_components, X, y, cv, estimators, scoring):
    out = dict(n_components=n_components)
    for name, est in estimators.items():
        print(name, n_components)
        this_est = est
        this_est.steps[0][1].n_compo = n_components
        scores = cross_val_score(X=X,
                                 y=y,
                                 cv=copy.deepcopy(cv),
Exemplo n.º 4
0
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)

fname = op.join(cfg.path_outputs, 'covs_allch_oas.h5')
covs = mne.externals.h5io.read_hdf5(fname)
subjects = [d['subject'] for d in covs if 'subject' in d]
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
X = np.array(covs)
n_sub, n_fb, n_ch, _ = X.shape

part = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
y = part.set_index('Observations').age.loc[subjects]

logdiag = LogDiag()
riemanngeo = Riemann(n_fb=n_fb, metric=metric)
riemannwass = RiemannSnp(n_fb=n_fb, rank=n_compo)

sc = StandardScaler()
ridge = RidgeCV(alphas=np.logspace(-3, 5, 100))
dummy = DummyRegressor()
cv = KFold(n_splits=n_splits, shuffle=True, random_state=seed)

print('---LogDiag-------------------------------------------')
reg = 0
shrinks = np.linspace(0, 1, 10)

identity = ProjIdentitySpace()
random = ProjRandomSpace(n_compo=n_compo)
commonwass = ProjCommonWassSpace(n_compo=n_compo)
commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg)
def run_models(cv):
    logdiag = LogDiag()
    naivevec = NaiveVec(method='upper')
    riemanngeo = Riemann(n_fb=n_fb, metric=metric)
    riemannwass = RiemannSnp(n_fb=n_fb, rank=n_compo)

    sc = StandardScaler()
    ridge = RidgeCV(alphas=np.logspace(-3, 5, 100))
    dummy = DummyRegressor()
    print('---LogDiag-------------------------------------------')
    reg = 0
    shrinks = np.linspace(0, 1, 10)

    identity = ProjIdentitySpace()
    euclidean_vec = NaiveVec(method='upper')
    random = ProjRandomSpace(n_compo=n_compo)
    commonwass = ProjCommonWassSpace(n_compo=n_compo)
    commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg)
    spoc = ProjSPoCSpace(shrink=0.5, scale=scale, n_compo=n_compo, reg=reg)

    print('Pipeline: Dummy')
    pipe = make_pipeline(identity, logdiag, sc, dummy)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_dummy = scores

    print('Pipeline: Identity + NaiveVec')
    pipe = make_pipeline(identity, naivevec, sc, ridge)
    scores = cross_val_score(pipe,
                             X=X,
                             y=y,
                             cv=cv,
                             n_jobs=n_jobs,
                             scoring='neg_mean_absolute_error',
                             error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_sensor_naivevec = scores

    print('Pipeline: Identity + LogDiag')
    pipe = make_pipeline(identity, logdiag, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_sensor_logdiag = scores

    print('Pipeline: Random + LogDiag')
    pipe = make_pipeline(random, logdiag, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_rand_logdiag = scores

    print('Pipeline: CommonEucl + LogDiag')
    pipe = make_pipeline(commoneucl, logdiag, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_unsup_logdiag = scores

    print('Pipeline: SPoC + LogDiag')
    scores_sup_logdiag = []
    for shrink in shrinks:
        spoc = ProjSPoCSpace(shrink=shrink,
                             scale=scale,
                             n_compo=n_compo,
                             reg=reg)
        pipe = make_pipeline(spoc, logdiag, sc, ridge)
        scores = -cross_val_score(pipe,
                                  X=X,
                                  y=y,
                                  cv=cv,
                                  n_jobs=n_jobs,
                                  scoring='neg_mean_absolute_error',
                                  error_score=np.nan)
        print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
        scores_sup_logdiag.append(scores)

    scores_sup_logdiag = scores_sup_logdiag[np.argmin(
        np.mean(scores_sup_logdiag, axis=1))]

    print('---RiemannWass-------------------------------------------')
    reg = 0
    shrinks = np.linspace(0, 1, 10)

    identity = ProjIdentitySpace()
    random = ProjRandomSpace(n_compo=n_compo)
    commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg)
    spoc = ProjSPoCSpace(shrink=0.5, scale=scale, n_compo=n_compo, reg=reg)

    print('Pipeline: Identity + RiemannWass')
    pipe = make_pipeline(identity, riemannwass, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_sensor_riemannwass = scores

    print('Pipeline: Random + RiemannWass')
    pipe = make_pipeline(random, riemannwass, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_rand_riemannwass = scores

    print('Pipeline: CommonEucl + RiemannWass')
    pipe = make_pipeline(commoneucl, riemannwass, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_unsup_riemannwass = scores

    print('Pipeline: SPoC + RiemannWass')
    scores_sup_riemannwass = []
    for shrink in shrinks:
        spoc = ProjSPoCSpace(shrink=shrink,
                             scale=scale,
                             n_compo=n_compo,
                             reg=reg)
        pipe = make_pipeline(spoc, riemannwass, sc, ridge)
        scores = -cross_val_score(pipe,
                                  X=X,
                                  y=y,
                                  cv=cv,
                                  n_jobs=n_jobs,
                                  scoring='neg_mean_absolute_error',
                                  error_score=np.nan)
        print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
        scores_sup_riemannwass.append(scores)
    scores_sup_riemannwass = scores_sup_riemannwass[np.argmin(
        np.mean(scores_sup_riemannwass, axis=1))]

    print('---RiemannGeo-------------------------------------------')
    shrink = 0.5
    regs = np.logspace(-7, -3, 5)

    identity = ProjIdentitySpace()
    random = ProjRandomSpace(n_compo=n_compo)
    lw = ProjLWSpace(shrink=shrink)
    commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=0)
    spoc = ProjSPoCSpace(shrink=shrink, scale=scale, n_compo=n_compo, reg=0)

    print('Pipeline: Random + RiemannGeo')
    pipe = make_pipeline(random, riemanngeo, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_rand_riemanngeo = scores

    print('Pipeline: LW + RiemannGeo')
    pipe = make_pipeline(lw, riemanngeo, sc, ridge)
    scores = -cross_val_score(pipe,
                              X=X,
                              y=y,
                              cv=cv,
                              n_jobs=n_jobs,
                              scoring='neg_mean_absolute_error',
                              error_score=np.nan)
    print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
    scores_sensor_riemanngeo = scores

    print('Pipeline: CommonEucl + RiemannGeo')
    scores_unsup_riemanngeo = []
    for reg in regs:
        commoneucl = ProjCommonSpace(scale=scale, n_compo=n_compo, reg=reg)
        pipe = make_pipeline(commoneucl, riemanngeo, sc, ridge)
        scores = -cross_val_score(pipe,
                                  X=X,
                                  y=y,
                                  cv=cv,
                                  n_jobs=n_jobs,
                                  scoring='neg_mean_absolute_error',
                                  error_score=np.nan)
        print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
        scores_unsup_riemanngeo.append(scores)
    scores_unsup_riemanngeo = scores_unsup_riemanngeo[np.argmin(
        np.mean(scores_unsup_riemanngeo, axis=1))]

    print('Pipeline: SPoC + RiemannGeo')
    scores_sup_riemanngeo = []
    for reg in regs:
        spoc = ProjSPoCSpace(shrink=shrink,
                             scale=scale,
                             n_compo=n_compo,
                             reg=reg)
        pipe = make_pipeline(spoc, riemanngeo, sc, ridge)
        scores = -cross_val_score(pipe,
                                  X=X,
                                  y=y,
                                  cv=cv,
                                  n_jobs=n_jobs,
                                  scoring='neg_mean_absolute_error',
                                  error_score=np.nan)
        print("CV score: %.2f +/- %.2f" % (np.mean(scores), np.std(scores)))
        scores_sup_riemanngeo.append(scores)
    scores_sup_riemanngeo = scores_sup_riemanngeo[np.argmin(
        np.mean(scores_sup_riemanngeo, axis=1))]

    scores = {
        'dummy': np.array(scores_dummy),
        'sensor_euclidean': np.array(scores_sensor_naivevec),
        'sensor_logdiag': np.array(scores_sensor_logdiag),
        'rand_logdiag': np.array(scores_rand_logdiag),
        'unsup_logdiag': np.array(scores_unsup_logdiag),
        'sup_logdiag': np.array(scores_sup_logdiag),
        'sensor_riemannwass': np.array(scores_sensor_riemannwass),
        'rand_riemannwass': np.array(scores_rand_riemannwass),
        'unsup_riemannwass': np.array(scores_unsup_riemannwass),
        'sup_riemannwass': np.array(scores_sup_riemannwass),
        'sensor_riemanngeo': np.array(scores_sensor_riemanngeo),
        'rand_riemanngeo': np.array(scores_rand_riemanngeo),
        'unsup_riemanngeo': np.array(scores_unsup_riemanngeo),
        'sup_riemanngeo': np.array(scores_sup_riemanngeo)
    }
    return scores