Beispiel #1
0
def logreg_timedecoding(epochs, numcv=4, jobs=1):
    """
    Logistic regression over sensors. Returns Evoked array containing coefficients and ROC.
    Code snippets stolen from:
    https://martinos.org/mne/stable/auto_tutorials/plot_sensors_decoding.html
    """

    X = epochs.get_data()  # MEG signals: n_epochs, n_channels, n_times
    X = X.astype(float)
    y = epochs.events[:, 2]  # targets

    # setup and run the decoder

    clf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression()))

    time_decod = SlidingEstimator(clf, scoring='roc_auc',
                                  n_jobs=jobs)  #scoring='roc_auc',

    scores = cross_val_multiscore(time_decod, X, y, cv=numcv, n_jobs=jobs)

    # Mean scores across cross-validation splits
    scores = np.mean(scores, axis=0)

    #
    time_decod = SlidingEstimator(clf, scoring='roc_auc', n_jobs=jobs)
    time_decod.fit(X, y)

    coef = get_coef(time_decod, 'patterns_', inverse_transform=True)

    evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
    evoked.roc_auc = scores

    return evoked
Beispiel #2
0
                                        sfreq=epochs.info['sfreq'],
                                        freqs=freqs,
                                        output='power',
                                        n_cycles=n_cycles)
n_epochs, n_channels, n_freqs, n_times = X.shape
X = X.reshape(n_epochs, n_channels, -1)  # collapse freqs and time
# Run decoding on TFR output
for analysis in analyses:
    fname = results_folder +\
        '%s_tf_scores_%s_%s.npy' % (subject, 'Cue', analysis)
    y = np.array(events_behavior[analysis])
    clf = make_pipeline(
        StandardScaler(),
        force_predict(LogisticRegression(), 'predict_proba', axis=1))
    scorer = scorer_auc
    kwargs = dict()
    le = preprocessing.LabelEncoder()
    le.fit(y)
    y = le.transform(y)
    sel = np.where(y != 0)[0]
    td = SlidingEstimator(clf,
                          scoring=make_scorer(scorer),
                          n_jobs=24,
                          **kwargs)
    td.fit(X[sel], y[sel])
    scores = cross_val_multiscore(td, X[sel], y[sel], cv=StratifiedKFold(12))
    scores = scores.mean(axis=0)
    scores = np.reshape(scores, (n_freqs, n_times))
    # Save cross validated scores
    np.save(fname, np.array(scores))
Beispiel #3
0
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')  # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
plt.show()

###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression()))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)

coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
                    topomap_args=dict(time_unit='s'))
evoked.plot_joint(times=np.arange(0., .500, .100),
                  title='patterns',
                  **joint_kwargs)

###############################################################################
# Temporal Generalization
# -----------------------
#
# This runs the analysis used in [1]_ and further detailed in [2]_
#
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')  # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')

###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
                    LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)

coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
                    topomap_args=dict(time_unit='s'))
evoked.plot_joint(times=np.arange(0., .500, .100), title='patterns',
                  **joint_kwargs)

###############################################################################
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
    n_jobs=n_jobs,
)

y = epochs_df['label'].values.copy()
y[y == 2] = 0

scores = cross_val_multiscore(
    time_decoder,
    X=label_ts,
    y=y,
    groups=epochs_df['session'],
    cv=len(epochs_df['session'].unique()),
    n_jobs=n_jobs,
)

time_decoder.fit(label_ts, y)
coef = get_coef(time_decoder, 'patterns_', inverse_transform=True)

# %%
# Plot


def fig_save(fig, path):
    html = fig.to_html()
    with open(path, 'w') as f:
        f.write(html)


p = np.max(np.abs(coef), axis=1)
o = np.argsort(p)[::-1]
p = p[o]
Beispiel #6
0
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')  # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')

#------------------------------------------------------------------------------

clf = make_pipeline(StandardScaler(),
                    LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
time_decod.fit(ica_data, labels)

coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
                    topomap_args=dict(time_unit='s'))
evoked.plot_joint(times=np.arange(9.5, 20.5, 1.), title='patterns',
                  **joint_kwargs)


#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------

#plot power image (time frequency-analysis)
n_cycles = 5 #number of cycles in Morlet wavelet
Beispiel #7
0
X_all = np.concatenate([X1, X2], axis=0)
y_all = np.concatenate([y1, y2], axis=0)
X_all.shape, y_all.shape

times = worker.clean_epochs.times

n_splits = int(y1.shape[0] / 64)

y_pred_time = np.zeros((3313, 141))

skf = StratifiedKFold(n_splits=n_splits, shuffle=False)
for train_index, test_index in skf.split(X_all, y_all):
    X_train, y_train = X_all[train_index], y_all[train_index]
    X_test, y_test = X_all[test_index], y_all[test_index]

    estimator.fit(X_train, y_train)
    y_pred_time[test_index] = estimator.predict(X_test)

# %%
y_pred_time.shape

# %%
report = metrics.classification_report(y_true=y_all,
                                       y_pred=y_pred_time[:, 60],
                                       output_dict=True)
print(report)

# %%


def fuck_report(report):
Beispiel #8
0
     # different probe compare to target
     if (epoch_type == 'Probe') & ('target' in analysis):
         sel = np.where((events['Change'] == 1) & (~np.isnan(y)))[0]
     else:
         sel = np.where(~np.isnan(y))[0]
 td = SlidingEstimator(clf,
                       scoring=make_scorer(scorer),
                       n_jobs=24,
                       **kwargs)
 # run decoding
 cv = StratifiedKFold(8)
 scores = list()
 patterns = list()
 filters = list()
 for train, test in cv.split(X[sel], y[sel]):
     td.fit(X[sel][train], y[sel][train])
     score = td.score(X[sel][test], y[sel][test])
     scores.append(score)
     patterns.append(get_coef(td, 'patterns_', inverse_transform=True))
     filters.append(get_coef(td, 'filters_', inverse_transform=True))
 scores = np.mean(scores, axis=0)
 patterns = np.mean(patterns, axis=0)
 filters = np.mean(filters, axis=0)
 if 'angle' in analysis:
     patterns = np.mean(np.abs(patterns), axis=1)
     filters = np.mean(np.abs(filters), axis=1)
 scores = np.reshape(scores, (n_freqs, n_times))
 patterns = np.reshape(patterns, (n_channels, n_freqs, n_times))
 filters = np.reshape(filters, (n_channels, n_freqs, n_times))
 # save cross-validated scores
 fname = results_folder +\
        # fit_transform
        X_train = xdawn.fit_transform(epochs[train_index])
        y_train = y_true[train_index]
        # transform
        X_test = xdawn.transform(epochs[test_index])
        y_test = y_true[test_index]

        # All time decoder
        # Fit
        raw_decoder.fit(X_train, y_train)
        # Predict
        y_pred[test_index] = raw_decoder.predict(X_test)

        # Time decoder
        # Fit
        time_decoder.fit(X_train, y_train)
        # Predict
        y_pred_time[test_index] = time_decoder.predict(X_test)

        # Time window decoder
        for window_length, _info in time_windows.items():
            # _info[0], w_start
            # _info[1], w_length
            # _info[2], w_time
            w_length = _info[1]
            for j, w_start in enumerate(_info[0]):
                w_stop = w_start + w_length
                print(w_start)
                # Crop X_train and X_test
                _X_train = X_train[:, :, w_start:w_start + w_length]
                _X_test = X_test[:, :, w_start:w_start + w_length]
 y = le.transform(y)
 sel = np.where(y != 0)[0]
 le = LabelEncoder()
 le.fit(y_con)
 y_con = le.transform(y_con)
 sel_con = np.where(y_con != 0)[0]
 # Define estimators depending on the analysis
 clf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression()))
 kwargs = dict()
 est = SlidingEstimator(clf, scoring='roc_auc', n_jobs=24, **kwargs)
 # Run decoding
 cv = StratifiedKFold(12)
 scores = list()
 scores_con = list()
 for train, test in cv.split(X[sel], y[sel]):
     est.fit(X[sel][train], y[sel][train])  # train during WM task
     score = est.score(X[sel][test], y[sel][test])  # test during WM task
     score_con = est.score(X_con[sel_con],
                           y_con[sel_con])  # test during control task
     scores.append(score)
     scores_con.append(score_con)
 scores = np.mean(scores, axis=0)
 scores = np.reshape(scores, (n_freqs, n_times))
 scores_con = np.mean(scores_con, axis=0)
 scores_con = np.reshape(scores_con, (n_freqs, n_times))
 # save cross-validated scores
 fname = results_folder +\
     '%s_scores_tf_%s.npy' % (subject, analysis)
 np.save(fname, np.array(scores))
 fname = results_folder +\
     '%s_scores_tf_%s_con.npy' % (subject, analysis)
Beispiel #11
0
    n_jobs=n_jobs,
)

y = df['label'].values.copy()
y[y == 2] = 0

scores = cross_val_multiscore(
    time_decoder,
    X=data,
    y=y,
    groups=df['session'],
    cv=len(df['session'].unique()),
    n_jobs=n_jobs,
)

time_decoder.fit(data, y)

# %%
# Plot

kwargs = dict(
    x=times,
    y=np.mean(scores, axis=0),
    error_y=np.std(scores, axis=0),
    title=f'Sensor Space Decoding (Auc of Roc) of {subject_name}',
)
fig = px.line(**kwargs)
fig.show()

coef = get_coef(time_decoder, 'patterns_', inverse_transform=True)[0]
evoked_time_gen = mne.EvokedArray(coef, info, tmin=times[0])