Beispiel #1
0
# filter.
#
# The second is a `FilterBankLeftRightImagery` with a bank of 6 filter, ranging
# from 8 to 35 Hz.

datasets = [BNCI2014001()]
overwrite = False  # set to True if we want to overwrite cached results

# broadband filters
filters = [[8, 35]]
paradigm = LeftRightImagery(filters=filters)
evaluation = CrossSessionEvaluation(paradigm=paradigm,
                                    datasets=datasets,
                                    suffix='examples',
                                    overwrite=overwrite)
results = evaluation.process(pipelines)

# cashed results might return other pipelines
results = results[results.pipeline == 'CSP + LDA']

# bank of 6 filter, by 4 Hz increment
filters = [[8, 12], [12, 16], [16, 20], [20, 24], [24, 28], [28, 35]]
paradigm = FilterBankLeftRightImagery()
evaluation = CrossSessionEvaluation(paradigm=paradigm,
                                    datasets=datasets,
                                    suffix='examples',
                                    overwrite=overwrite)
results_fb = evaluation.process(pipelines_fb)

###############################################################################
# After processing the two, we simply concatenate the results.
Beispiel #2
0
#
# Paradigms define the events, epoch time, bandpass, and other preprocessing
# parameters. They have defaults that you can read in the documentation, or you
# can simply set them as we do here. A single paradigm defines a method for
# going from continuous data to trial data of a fixed size. To learn more look
# at the tutorial Exploring Paradigms

fmin = 8
fmax = 35
paradigm = LeftRightImagery(fmin=fmin, fmax=fmax)

##########################################################################
# Evaluation
# --------------------
#
# An evaluation defines how the training and test sets are chosen. This could
# be cross-validated within a single recording, or across days, or sessions, or
# subjects. This also is the correct place to specify multiple threads.

evaluation = CrossSessionEvaluation(paradigm=paradigm,
                                    datasets=datasets,
                                    suffix="examples",
                                    overwrite=False)
results = evaluation.process(pipelines)

##########################################################################
# Results are returned as a pandas DataFrame, and from here you can do as you
# want with them

print(results.head())
# and a logistic regression. This pipeline is evaluated across session using
# ROC-AUC metric.

mne_ppl = {}
mne_ppl["MNE LR"] = make_pipeline(
    MyVectorizer(), StandardScaler(), LogisticRegression(penalty="l1", solver="liblinear")
)

mne_eval = CrossSessionEvaluation(
    paradigm=paradigm,
    datasets=datasets,
    suffix="examples",
    overwrite=True,
    return_epochs=True,
)
mne_res = mne_eval.process(mne_ppl)

##############################################################################
# Advanced MNE pipeline
# ---------------------
#
# In some case, the MNE pipeline should have access to the original labels from
# the dataset. This is the case for the XDAWN code of MNE. One could pass
# `mne_labels` to evaluation in order to keep this label.
# As an example, we will define a pipeline that compute an XDAWN filter, rescale,
# then apply a logistic regression.

mne_adv = {}
mne_adv["XDAWN LR"] = make_pipeline(
    Xdawn(n_components=5, reg="ledoit_wolf", correct_overlap=False),
    Vectorizer(),