Пример #1
0
# set to 200 Hz.

paradigm = LeftRightImagery(channels=['C3', 'C4', 'Cz'], resample=200.)

##############################################################################
# Evaluation
# ----------
#
# The evaluation is conducted on with CSP+LDA, only on the 3 electrodes, with
# a sampling rate of 200 Hz.

evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=datasets)
csp_lda = make_pipeline(CSP(n_components=2), LDA())
ts_lr = make_pipeline(Covariances(estimator='oas'),
                      TangentSpace(metric='riemann'), LR(C=1.0))
results = evaluation.process({'csp+lda': csp_lda, 'ts+lr': ts_lr})
print(results.head())

##############################################################################
# Electrode selection
# -------------------
#
# It is possible to select the electrodes that are shared by all datasets
# using the `find_intersecting_channels` function. Datasets that have 0
# overlap with others are discarded. It returns the set of common channels,
# as well as the list of datasets with valid channels.

electrodes, datasets = find_intersecting_channels(datasets)
evaluation = WithinSessionEvaluation(paradigm=paradigm,
                                     datasets=datasets,
                                     overwrite=True)
Пример #2
0
data_size = dict(policy="ratio", value=np.geomspace(0.02, 1, 6))
# When the training data is sparse, peform more permutations than when we have a lot of data
n_perms = np.floor(np.geomspace(20, 2, len(data_size["value"]))).astype(int)
print(n_perms)
# Guarantee reproducibility
np.random.seed(7536298)
evaluation = WithinSessionEvaluation(
    paradigm=paradigm,
    datasets=datasets,
    data_size=data_size,
    n_perms=n_perms,
    suffix="examples_lr",
    overwrite=overwrite,
)

results = evaluation.process(pipelines)

##############################################################################
# Plot Results
# ------------
#
# Here we plot the results.

fig, ax = plt.subplots(facecolor="white", figsize=[8, 4])

n_subs = len(dataset.subject_list)

if n_subs > 1:
    r = results.groupby(["pipeline", "subject",
                         "data_size"]).mean().reset_index()
else:
datasets = [Zhou2016(), BNCI2014001()]
subj = [1, 2, 3]
for d in datasets:
    d.subject_list = subj

# The following lines go exactly as in the previous example, where we end up
# obtaining a pandas dataframe containing the results of the evaluation. We
# could set `overwrite` to False to cache the results, avoiding to restart all
# the evaluation from scratch if a problem occurs.
paradigm = LeftRightImagery()
evaluation = WithinSessionEvaluation(paradigm=paradigm,
                                     datasets=datasets,
                                     overwrite=False)
pipeline = make_pipeline(CSP(n_components=8), LDA())
results = evaluation.process({"csp+lda": pipeline})

##############################################################################
# Plotting Results
# ----------------
#
# We plot the results using the seaborn library. Note how easy it
# is to plot the results from the three datasets with just one line.

results["subj"] = [str(resi).zfill(2) for resi in results["subject"]]
g = sns.catplot(
    kind="bar",
    x="score",
    y="subj",
    col="dataset",
    data=results,
Пример #4
0
# datasets to analyze, and whether the scores should be recalculated each time
# we run the evaluation or if MOABB should create a cache file.
#
# Note that there are different ways of evaluating a classifier; in this
# example, we choose `WithinSessionEvaluation`, which consists of doing a
# cross-validation procedure where the training and testing partitions are from
# the same recording session of the dataset. We could have used
# `BetweenSessionEvaluation`, which takes all but one session as training
# partition and the remaining one as testing partition.

evaluation = WithinSessionEvaluation(paradigm=paradigm,
                                     datasets=[dataset],
                                     overwrite=True)

# We obtain the results in the form of a pandas dataframe
results = evaluation.process({'csp+lda': pipeline})

# To export the results in CSV within a directory:
if not os.path.exists('./results'):
    os.mkdir('./results')
results.to_csv('./results/results_part2-1.csv')

# To load previously obtained results saved in CSV
results = pd.read_csv('./results/results_part2-1.csv')

##############################################################################
# Plotting Results
# ----------------
#
# We create a figure with the seaborn package comparing the classification
# score for each subject on each session. Note that the 'subject' field from
Пример #5
0
        if isinstance(config['pipeline'], BaseEstimator):
            pipeline = deepcopy(config['pipeline'])
        else:
            log.error(config['pipeline'])
            raise (
                ValueError('pipeline must be a list or a sklearn estimator'))

        # append the pipeline in the paradigm list
        if paradigm not in paradigms.keys():
            paradigms[paradigm] = {}

        # FIXME name are not unique
        log.debug('Pipeline: \n\n {} \n'.format(get_string_rep(pipeline)))
        paradigms[paradigm][config['name']] = pipeline

all_results = []
for paradigm in paradigms:
    # get the context
    if len(context_params) == 0:
        context_params[paradigm] = {}
    log.debug('{}: {}'.format(paradigm, context_params[paradigm]))
    p = getattr(moabb_paradigms, paradigm)(**context_params[paradigm])
    context = WithinSessionEvaluation(paradigm=p,
                                      random_state=42,
                                      n_jobs=options.threads)
    results = context.process(pipelines=paradigms[paradigm])
    all_results.append(results)
analyze(pd.concat(all_results, ignore_index=True),
        options.output,
        plot=options.plot)
Пример #6
0
# set to 200 Hz.

paradigm = LeftRightImagery(channels=["C3", "C4", "Cz"], resample=200.0)

##############################################################################
# Evaluation
# ----------
#
# The evaluation is conducted on with CSP+LDA, only on the 3 electrodes, with
# a sampling rate of 200 Hz.

evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=datasets)
csp_lda = make_pipeline(CSP(n_components=2), LDA())
ts_lr = make_pipeline(Covariances(estimator="oas"),
                      TangentSpace(metric="riemann"), LR(C=1.0))
results = evaluation.process({"csp+lda": csp_lda, "ts+lr": ts_lr})
print(results.head())

##############################################################################
# Electrode selection
# -------------------
#
# It is possible to select the electrodes that are shared by all datasets
# using the `find_intersecting_channels` function. Datasets that have 0
# overlap with others are discarded. It returns the set of common channels,
# as well as the list of datasets with valid channels.

electrodes, datasets = find_intersecting_channels(datasets)
evaluation = WithinSessionEvaluation(paradigm=paradigm,
                                     datasets=datasets,
                                     overwrite=True)
Пример #7
0
        return [path]  # it has to return a list


##############################################################################
# Using the ExampleDataset
# ------------------------
#
# Now that the `ExampleDataset` is defined, it could be instanciated directly.
# The rest of the code follows the steps described in the previous tutorials.
dataset = ExampleDataset()

paradigm = LeftRightImagery()
X, labels, meta = paradigm.get_data(dataset=dataset, subjects=[1])

evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=dataset,
                                     overwrite=True)
pipelines = {}
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM(metric='riemann'))
scores = evaluation.process(pipelines)

print(scores)

##############################################################################
# Pushing on MOABB Github
# -----------------------
#
# If you want to make your dataset available to everyone, you could upload 
# your data on public server (like Zenodo or Figshare) and signal that you
# want to add your dataset to MOABB in the [dedicated issue](https://github.com/NeuroTechX/moabb/issues/1).  # noqa: E501
# You could then follow the instructions on [how to contribute](https://github.com/NeuroTechX/moabb/blob/master/CONTRIBUTING.md)  # noqa: E501
Пример #8
0
import logging
import coloredlogs
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
coloredlogs.install(level=logging.DEBUG)

datasets = utils.dataset_search('imagery',
                                events=['supination', 'hand_close'],
                                has_all_events=False,
                                min_subjects=2,
                                multi_session=False)

for d in datasets:
    d.subject_list = d.subject_list[:10]

paradigm = ImageryNClass(2)
context = WithinSessionEvaluation(paradigm=paradigm,
                                  datasets=datasets,
                                  random_state=42)

pipelines = OrderedDict()
pipelines['av+TS'] = make_pipeline(Covariances(estimator='oas'),
                                   TSclassifier())
pipelines['av+CSP+LDA'] = make_pipeline(Covariances(estimator='oas'), CSP(8),
                                        LDA())

results = context.process(pipelines, overwrite=True)

analyze(results, './')