Exemple #1
0
##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (SSVEP) and use the dataset available for it.
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwritten if necessary.

overwrite = False  # set to True if we want to overwrite cached results

evaluation = CrossSubjectEvaluation(paradigm=paradigm,
                                    datasets=dataset,
                                    overwrite=overwrite)
results = evaluation.process(pipelines)

# Filter bank processing, determine automatically the filter from the
# stimulation frequency values of events.
evaluation_fb = CrossSubjectEvaluation(paradigm=paradigm_fb,
                                       datasets=dataset,
                                       overwrite=overwrite)
results_fb = evaluation_fb.process(pipelines_fb)

###############################################################################
# After processing the two, we simply concatenate the results.

results = pd.concat([results, results_fb])
pipelines['RG + LR'] = make_pipeline(Covariances(), TangentSpace(),
                                     LogisticRegression())

pipelines['CSP + LR'] = make_pipeline(CSP(n_components=8),
                                      LogisticRegression())

pipelines['RG + LDA'] = make_pipeline(Covariances(), TangentSpace(), LDA())

##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001).
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwritten if necessary.

paradigm = MotorImagery()
datasets = paradigm.datasets[:2]
overwrite = False  # set to True if we want to overwrite cached results
evaluation = CrossSubjectEvaluation(paradigm=paradigm,
                                    datasets=datasets,
                                    suffix='examples',
                                    overwrite=overwrite)

results = evaluation.process(pipelines)
print