# the data and we keep only the first 3 classes, that is stimulation
# frequency of 6.66, 7.50 and 8.57 Hz.

paradigm = SSVEP(fmin=3, fmax=15, n_classes=3)

##############################################################################
# Create pipelines
# ----------------
#
# Use a Canonical Correlation Analysis classifier

interval = dataset.interval
freqs = paradigm.used_events(dataset)

pipeline = {}
pipeline["CCA"] = make_pipeline(SSVEP_CCA(interval=interval, freqs=freqs, n_harmonics=3))

##############################################################################
# Get data (optional)
# -------------------
#
# To get access to the EEG signals downloaded from the dataset, you could
# use `dataset.get_data(subjects=[subject_id])` to obtain the EEG under
# MNE format, stored in a dictionary of sessions and runs.
# Otherwise, `paradigm.get_data(dataset=dataset, subjects=[subject_id])`
# allows to obtain the EEG data in scikit format, the labels and the meta
# information. In `paradigm.get_data`, the EEG are preprocessed according
# to the paradigm requirement.

# sessions = dataset.get_data(subjects=[3])
# X, labels, meta = paradigm.get_data(dataset=dataset, subjects=[3])
示例#2
0
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
# The first pipeline uses Riemannian geometry, by building an extended
# covariance matrices from the signal filtered around the considered
# frequency and applying a logistic regression in the tangent plane.
# The second pipeline relies on the above defined CCA classifier.

pipelines_fb = {}
pipelines_fb['RG + LogReg'] = make_pipeline(
    ExtendedSSVEPSignal(), Covariances(estimator='lwf'), TangentSpace(),
    LogisticRegression(solver='lbfgs', multi_class='auto'))

pipelines = {}
pipelines['CCA'] = make_pipeline(
    SSVEP_CCA(interval=interval, freqs=freqs, n_harmonics=3))

##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (SSVEP) and use the dataset available for it.
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwritten if necessary.

overwrite = False  # set to True if we want to overwrite cached results