epochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,
                    picks=picks, baseline=None, preload=True,
                    reject=dict(mag=5e-12), decim=decim, verbose=False)

# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.

# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.

# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)

gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)

# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))

gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)

# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)

gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(
    title="Generalization Across Time (visual vs auditory): left to right")
                    # Apply contrast
                    if clf_type['name']=='SVC':
                        decoding_parameters = decoding_params[0]['values']
                    elif clf_type['name']=='SVR':
                        decoding_parameters = decoding_params[1]['values']
                    gat = GeneralizationAcrossTime(**decoding_parameters)
                    gat.fit(epochs[sel], y=y[sel])
                    gat.score(epochs[sel], y=y[sel])

                    # Plot
                    fig = gat.plot_diagonal(show=False)
                    report.add_figs_to_section(fig,
                        ('%s %s: (decoding)' % (subject, cond_name)), subject)

                    fig = gat.plot(show=False)
                    report.add_figs_to_section(fig,
                        ('%s %s: GAT' % (subject, cond_name)), subject)

                    # Save contrast
                    pkl_fname = op.join(data_path, subject, 'mvpas',
                        '{}-decod_{}_{}{}.pickle'.format(subject, cond_name,clf_type['name'],fname_appendix))

                    # Save classifier results
                    with open(pkl_fname, 'wb') as f:
                        pickle.dump([gat, contrast], f)

                break
            break
        break
                    preload=True,
                    reject=dict(mag=5e-12),
                    decim=decim,
                    verbose=False)

# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.

# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.

# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)

gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)

# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))

gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)

# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)

gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(title="Temporal Generalization (visual vs auditory): left to right")
Пример #4
0
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]

# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(clf=clf,
                               scorer="roc_auc",
                               cv=cv,
                               predict_method="predict")

gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_cp.jl")

# make matrix plot and save it
fig = gat.plot(cmap="viridis",
               title="Temporal Gen (Classic vs planning) for CharPath")
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_cp.png")

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for CharPath")
fig.savefig(data_path + "decode_time_gen/gat_diagonal_cp.png")
events = mne.find_events(raw, stim_channel='UPPT001')
event_id = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.1, 0.5

decim = 4  # decimate to make the example faster to run
epochs = mne.Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    reject=dict(mag=1.5e-12),
                    decim=decim,
                    verbose=False)

# Define decoder. The decision_function is employed to use AUC for scoring
gat = GeneralizationAcrossTime(predict_mode='cross-validation',
                               predict_type='decision_function',
                               n_jobs=2)

# fit and score
gat.fit(epochs)
gat.score(epochs)
gat.plot(vmin=0.1,
         vmax=0.9,
         title="Generalization Across Time (faces vs. scrambled)")
gat.plot_diagonal()  # plot decoding across time (correspond to GAT diagonal)
epochs_data.equalize_event_counts(["0", "1"])

# Classifier
clf = make_pipeline(StandardScaler(), LogisticRegression(C=1))

# Setup the y vector and GAT
gat = GeneralizationAcrossTime(
    predict_mode='mean-prediction', scorer="roc_auc", n_jobs=1)

# Fit model
print("fitting GAT")
gat.fit(epochs_data)

# Scoring
print("Scoring GAT")
gat.score(epochs_data)

# Save model
joblib.dump(
    gat, data_path + "decode_time_gen/%s_gat_allsensor-grad_ctl.jl" % subject)

# make matrix plot and save it
fig = gat.plot(cmap="viridis", title="Temporal Gen for subject: %s" % subject)
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_allsensor-grad_ctl.png"
            % subject)

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen for subject: %s" % subject)
fig.savefig(data_path +
            "decode_time_gen/%s_gat_diagonal_allsensor-grad_ctl.png" % subject)
Пример #7
0
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]

# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(clf=clf,
                               scorer="roc_auc",
                               cv=cv,
                               predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_pr.jl")

# make matrix plot and save it
fig = gat.plot(cmap="viridis",
               title="Temporal Gen (Classic vs planning) for Pagerank")
fig.savefig(data_path + "decode_time_gen/gat_matrix_pr.png")

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for Pagerank")
fig.savefig(data_path + "decode_time_gen/gat_diagonal_pr.png")
Пример #8
0
mne.equalize_channels([epochs_classic, epochs_plan])
mne.epochs.equalize_epoch_counts([epochs_classic, epochs_plan])

# Dirty hack # TODO: Check this from the Maxfilter side
# epochs_classic.info['dev_head_t'] = epochs_plan.info['dev_head_t']

epochs = mne.concatenate_epochs([epochs_classic, epochs_plan])

# Crop and downsmample to make it faster
epochs.crop(tmin=-3.5, tmax=0)
epochs.resample(250)

# Setup the y vector and GAT
y = np.concatenate(
    (np.zeros(len(epochs["press"])), np.ones(len(epochs["plan"]))))
gat = GeneralizationAcrossTime(
    predict_mode='mean-prediction', scorer="roc_auc", n_jobs=1)

# Fit model

# Scoring and visualise result
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/%s_gat_2.jl" % subject)

fig = gat.plot(
    title="Temporal Gen (Classic vs planning): left to right sub: %s" %
    subject)
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_2.png" % subject)
decim = 2  # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,
                    picks=picks, baseline=None, preload=True,
                    reject=dict(mag=5e-12), decim=decim, verbose=False)

# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.

# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.

# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)

gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)

# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))

gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)

# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)

gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(title="Temporal Generalization (visual vs auditory): left to right")
                    reject=dict(mag=5e-12),
                    decim=decim,
                    verbose=False)

# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.

# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.

# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)

gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)

# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))

gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)

# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)

gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(
    title="Generalization Across Time (visual vs auditory): left to right")
Пример #11
0
n_trial, n_chan, n_time = X.shape
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]

# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(
    clf=clf, scorer="roc_auc", cv=cv, predict_method="predict")

gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_cp.jl")

# make matrix plot and save it
fig = gat.plot(
    cmap="viridis", title="Temporal Gen (Classic vs planning) for CharPath")
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_cp.png")

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for CharPath")
fig.savefig(data_path + "decode_time_gen/gat_diagonal_cp.png")
            # Apply analysis
            gat = GeneralizationAcrossTime(clf=analysis['clf'],
                                           cv=analysis['cv'],
                                           scorer=analysis['scorer'],
                                           n_jobs=-1)
            gat.fit(epochs[sel], y=y[sel])
            gat.score(epochs[sel], y=y[sel])

            # Save analysis
            pkl_fname = paths('decod', subject=subject, data_type=data_type,
                              analysis=analysis['name'], log=True)

            # Save classifier results
            with open(pkl_fname, 'wb') as f:
                pickle.dump([gat, analysis, sel, events], f)

            # Plot
            fig = gat.plot_diagonal(show=False)
            report.add_figs_to_section(fig, ('%s %s %s: (diagonal)' %
                                       (subject, data_type, analysis['name'])),
                                       analysis['name'])

            fig = gat.plot(vmin=np.min(gat.scores_),
                           vmax=np.max(gat.scores_), show=False)
            report.add_figs_to_section(fig, ('%s %s %s: GAT' % (
                                       subject, data_type, analysis['name'])),
                                       analysis['name'])

report.save(open_browser=open_browser)
upload_report(report)
print(__doc__)

# Preprocess data
data_path = spm_face.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'

raw = mne.io.Raw(raw_fname % 1, preload=True)  # Take first run

picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')

events = mne.find_events(raw, stim_channel='UPPT001')
event_id = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.1, 0.5

decim = 4  # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    picks=picks, baseline=None, preload=True,
                    reject=dict(mag=1.5e-12), decim=decim, verbose=False)

# Define decoder. The decision function is employed to use cross-validation
gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=1)

# fit and score
gat.fit(epochs)
gat.score(epochs)
gat.plot(vmin=0.1, vmax=0.9,
         title="Generalization Across Time (faces vs. scrambled)")
gat.plot_diagonal()  # plot decoding across time (correspond to GAT diagonal)
Пример #14
0
p468 = mne.combine_evoked([evokeds['p_4'],evokeds['p_6'],evokeds['p_8']], weights='equal')
f2 = p468.plot_joint([.18,.3,.45,.6],title='Pop')
axes = f2.get_axes()
axes[0].set_ylim([-110, 110])


triggers = epochs.events[:, 2]
gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=12)
#gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=12)


ind = np.in1d(triggers, (4, 5, 6)).astype(int)
gat.fit(epochs[('np_4', 'np_6', 'np_8' ,'p_4', 'p_6', 'p_8')], y=ind)
gat.score(epochs[('np_4', 'np_6', 'np_8' ,'p_4', 'p_6', 'p_8')], y=ind)
gat.plot(vmin=.55,vmax=.7)
gat.plot_diagonal()


###
np8_vs_p4 = (triggers[np.in1d(triggers, (3, 4))] == 4).astype(int)
p8_vs_np4 = (triggers[np.in1d(triggers, (6, 1))] == 1).astype(int)
p8_vs_np8 = (triggers[np.in1d(triggers, (6, 3))] == 3).astype(int)
p6_vs_np6 = (triggers[np.in1d(triggers, (5, 2))] == 2).astype(int)
p4_vs_np4 = (triggers[np.in1d(triggers, (4, 1))] == 1).astype(int)
p8_vs_p6 = (triggers[np.in1d(triggers, (6, 5))] == 5).astype(int)
np8_vs_np6 = (triggers[np.in1d(triggers, (1, 2))] == 2).astype(int)
#
gat.fit(epochs[('p_4', 'np_4')], y=p4_vs_np4)
gat.score(epochs[('p_4', 'np_4')], y=p4_vs_np4)
gat.plot(vmin=.6,vmax=.75,title='p4_vs_np4')
Пример #15
0
mne.epochs.equalize_epoch_counts([epochs_classic, epochs_plan])

# Dirty hack # TODO: Check this from the Maxfilter side
# epochs_classic.info['dev_head_t'] = epochs_plan.info['dev_head_t']

epochs = mne.concatenate_epochs([epochs_classic, epochs_plan])

# Crop and downsmample to make it faster
epochs.crop(tmin=-3.5, tmax=0)
epochs.resample(250)

# Setup the y vector and GAT
y = np.concatenate(
    (np.zeros(len(epochs["press"])), np.ones(len(epochs["plan"]))))
gat = GeneralizationAcrossTime(predict_mode='mean-prediction',
                               scorer="roc_auc",
                               n_jobs=1)

# Fit model

# Scoring and visualise result
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/%s_gat_2.jl" % subject)

fig = gat.plot(
    title="Temporal Gen (Classic vs planning): left to right sub: %s" %
    subject)
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_2.png" % subject)
Пример #16
0
epochs.times = selected_times[:n_time]

# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(
    clf=clf, scorer="roc_auc", cv=cv, predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_ge.jl")

# make matrix plot and save it
fig = gat.plot(
    cmap="viridis", title="Temporal Gen (Classic vs planning) for Global Eff.")
fig.savefig(data_path + "decode_time_gen/gat_matrix_ge.png")

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for Global eff.")
fig.savefig(data_path + "decode_time_gen/gat_diagonal_ge.png")

# Manuel model

X2 = np.vstack([data_cls.reshape((13, -1)), data_pln.reshape(13, -1)])

ada = AdaBoostClassifier()
adaboost_params = {
    "n_estimators": np.arange(1, 21, 1),
    "learning_rate": np.arange(0.1, 1.1, 0.1)
}
Пример #17
0
# Here we'll use a stratified cross-validation scheme.

# make response vector
y = np.zeros(len(epochs.events), dtype=int)
y[epochs.events[:, 2] == 3] = 1
cv = StratifiedKFold(y=y)  # do a stratified cross-validation

# define the GeneralizationAcrossTime object
gat = GeneralizationAcrossTime(predict_mode="cross-validation", n_jobs=1, cv=cv, scorer=roc_auc_score)

# fit and score
gat.fit(epochs, y=y)
gat.score(epochs)

# let's visualize now
gat.plot()
gat.plot_diagonal()

###############################################################################
# Exercise
# --------
#  - Can you improve the performance using full epochs and a common spatial
#    pattern (CSP) used by most BCI systems?
#  - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
#    Face vs. Scrambled)
#
# Have a look at the example
# :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_space.py`
#
# References
# ==========
train_times = {'start': -0.1, 'stop': 2}

gat = GeneralizationAcrossTime(predict_mode='cross-validation',
                               train_times=train_times,
                               n_jobs=6,
                               cv=cv,
                               scorer=roc_auc_score)

# fit and score
print("Fitting")
gat.fit(epochs_clas, y=y)
print("Scoring")
gat.score(epochs_clas)

# let's visualize now
gat.plot()
gat.plot_diagonal()

# ------------------------------
# Time Decoding Generalization
# ------------------------------

data_path = '/home/claire/DATA/Data_Face_House/S01/'

epochs = mne.read_epochs(data_path + 'S01-epo.fif', preload=True)

# epoch starts 1 second after trial starts to avoid evoked effects
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)

decim = 4
Пример #19
0
y = np.zeros(len(epochs.events), dtype=int)
y[epochs.events[:, 2] == 3] = 1
cv = StratifiedKFold(y=y)  # do a stratified cross-validation

# define the GeneralizationAcrossTime object
gat = GeneralizationAcrossTime(predict_mode='cross-validation',
                               n_jobs=1,
                               cv=cv,
                               scorer=roc_auc_score)

# fit and score
gat.fit(epochs, y=y)
gat.score(epochs)

# let's visualize now
gat.plot()
gat.plot_diagonal()

###############################################################################
# Exercise
# --------
#  - Can you improve the performance using full epochs and a common spatial
#    pattern (CSP) used by most BCI systems?
#  - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
#    Face vs. Scrambled)
#
# Have a look at the example
# :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_space.py`
#
# References
# ==========
    clf = make_pipeline(StandardScaler(), clf)
    # initialize the GAT object
    gat = GeneralizationAcrossTime(clf=clf, scorer=scorer_auc, n_jobs=-1,
                                   cv=10)

    # select the trials where a target is presented
    for contrast in ['HL', 'EU', 'PR']:
        epochs_ = concatenate_epochs((epochs[contrast[0]],
                                      epochs[contrast[1]]))
        y = np.hstack((np.zeros(len(epochs[contrast[0]])),
                       np.ones(len(epochs[contrast[1]]))))
        gat.fit(epochs_, y=y)
        fname = op.join(data_path, 's%i_%s_fit.pkl' % (subject, contrast))
        with open(fname, 'wb') as f:
            pickle.dump(gat, f)
        # TODO: should save y_pred separately

        # predict + score
        scores = gat.score(epochs_, y=y)
        fname = op.join(data_path,
                        's%i_%s_scores.npy' % (subject, contrast))
        np.save(fname, np.array(scores))
        all_scores[contrast].append(np.array(scores))
        # plot
        fig, axes = plt.subplots(2, 1, facecolor='w')
        gat.plot_diagonal(show=False, ax=axes[0], chance=.5)
        gat.plot(show=False, ax=axes[1], vmin=.25, vmax=.75)
        report.add_figs_to_section(fig, str(subject), contrast)

report.save()
Пример #21
0
# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(clf=clf,
                               scorer="roc_auc",
                               cv=cv,
                               predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_ge.jl")

# make matrix plot and save it
fig = gat.plot(cmap="viridis",
               title="Temporal Gen (Classic vs planning) for Global Eff.")
fig.savefig(data_path + "decode_time_gen/gat_matrix_ge.png")

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for Global eff.")
fig.savefig(data_path + "decode_time_gen/gat_diagonal_ge.png")

# Manuel model

X2 = np.vstack([data_cls.reshape((13, -1)), data_pln.reshape(13, -1)])

ada = AdaBoostClassifier()
adaboost_params = {
    "n_estimators": np.arange(1, 21, 1),
    "learning_rate": np.arange(0.1, 1.1, 0.1)
}
Пример #22
0
# Create epochs to use for classification
n_trial, n_chan, n_time = X.shape
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]

epochs.crop(-3.8, None)

# fit model and score
gat = GeneralizationAcrossTime(
    scorer="accuracy", cv=cv, predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/%s_gat_tr.jl" % subject)

# make matrix plot and save it
fig = gat.plot(
    cmap="viridis",
    title="Temporal Gen (Classic vs planning) for transitivity.")
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_tr.png" % subject)

fig = gat.plot_diagonal(
    chance=0.5, title="Temporal Gen (Classic vs planning) for transitivity")
fig.savefig(data_path + "decode_time_gen/%s_gat_diagonal_tr.png" % subject)