def classify_vr(dataset, params, store):
    # get the paradigm
    paradigm = P300()
    scr = {}
    for lz in params.get_vr(dataset):
        print('running', lz)
        X = []
        X_training, labels_training, X_test, labels_test = [], [], [], []
        if not (params, lz) in store:

            paradigm.tmax = lz.tmax
            paradigm.tmin = lz.tmin
            paradigm.fmin = lz.fmin
            paradigm.fmax = lz.fmax
            paradigm.resample = lz.fs

            # define the dataset instance
            dataset.VR = True if lz.xpdesign == 'VR' else False
            dataset.PC = True if lz.xpdesign == 'PC' else False

            # get the epochs and labels
            X, labels, meta = paradigm.get_data(dataset, subjects=[lz.subject])
            labels = LabelEncoder().fit_transform(labels)

            # split in training and testing blocks
            X_training, labels_training, _ = get_block_repetition(
                X, labels, meta, lz.subset['train'], lz.repetitions)
            X_test, labels_test, _ = get_block_repetition(
                X, labels, meta, lz.subset['test'], lz.repetitions)

        scr[str(lz)] = use_store(params, store, lz, lz.validation,
                                 X_training, labels_training, X_test, labels_test, lz.condition, class_info_vr)

    return scr
Beispiel #2
0
 def test_P300_paradigm(self):
     # with a good dataset
     paradigm = P300()
     dataset = FakeDataset(event_list=['Target', 'NonTarget'],
                           paradigm='p300')
     X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
     self.assertEqual(len(np.unique(labels)), 2)
     self.assertEqual(list(np.unique(labels)),
                      sorted(['Target', 'NonTarget']))
Beispiel #3
0
 def test_P300_paradigm(self):
     # with a good dataset
     paradigm = P300()
     dataset = FakeDataset(event_list=["Target", "NonTarget"], paradigm="p300")
     X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
     self.assertEqual(len(np.unique(labels)), 2)
     self.assertEqual(list(np.unique(labels)), sorted(["Target", "NonTarget"]))
     # should return epochs
     epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
     self.assertIsInstance(epochs, BaseEpochs)
Beispiel #4
0
def get_benchmark_config(dataset_name,
                         cfg_prepro,
                         subjects=None,
                         sessions=None):
    benchmark_cfg = dict()
    paradigm = P300(resample=cfg_prepro['sampling_rate'],
                    fmin=cfg_prepro['fmin'],
                    fmax=cfg_prepro['fmax'],
                    reject_uv=cfg_prepro['reject_uv'],
                    baseline=cfg_prepro['baseline'])
    load_ival = [0, 1]
    if dataset_name == 'spot_single':
        d = SpotPilotData(load_single_trials=True)
        d.interval = load_ival
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = d.N_channels
    elif dataset_name == 'epfl':
        d = EPFLP300()
        d.interval = load_ival
        d.unit_factor = 1
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = 32
    elif dataset_name == 'bnci_1':
        d = bnci_1()
        d.interval = load_ival
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = 16
    elif dataset_name == 'bnci_als':
        d = bnci_als()
        d.interval = load_ival
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = 8
    elif dataset_name == 'bnci_2':
        d = bnci_2()
        d.interval = load_ival
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = 8
    elif dataset_name == 'braininvaders':
        d = bi2013a()
        d.interval = load_ival
        if subjects is not None:
            d.subject_list = [d.subject_list[i] for i in subjects]
        n_channels = 16
    else:
        raise ValueError(f'Dataset {dataset_name} not recognized.')

    benchmark_cfg['dataset'] = d
    benchmark_cfg['N_channels'] = n_channels
    benchmark_cfg['paradigm'] = paradigm
    return benchmark_cfg
Beispiel #5
0
# Time-decoupled Covariance classifier, needs information about number of
# channels and time intervals
c = TimeDecoupledLda(N_channels=16, N_times=10)
# TD-LDA needs to know about the used jumping means intervals
c.preproc = jmv
pipelines["JM+TD-LDA"] = make_pipeline(jmv, c)

##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (P300) and use all three datasets available for it.
# The evaluation will return a dataframe containing AUCs for each permutation
# and dataset size.

paradigm = P300(resample=processing_sampling_rate)
dataset = BNCI2014009()
# Remove the slicing of the subject list to evaluate multiple subjects
dataset.subject_list = dataset.subject_list[0:1]
datasets = [dataset]
overwrite = True  # set to True if we want to overwrite cached results
data_size = dict(policy="ratio", value=np.geomspace(0.02, 1, 6))
# When the training data is sparse, peform more permutations than when we have a lot of data
n_perms = np.floor(np.geomspace(20, 2, len(data_size["value"]))).astype(int)
print(n_perms)
# Guarantee reproducibility
np.random.seed(7536298)
evaluation = WithinSessionEvaluation(
    paradigm=paradigm,
    datasets=datasets,
    data_size=data_size,
Beispiel #6
0
 def test_P300_wrongevent(self):
     # does not accept dataset with bad event
     paradigm = P300()
     dataset = FakeDataset(paradigm="p300")
     self.assertRaises(AssertionError, paradigm.get_data, dataset)

warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=RuntimeWarning)
moabb.set_log_level("info")

###############################################################################
# Loading dataset
# ---------------
#
# Load 2 subjects of BNCI 2014-009 dataset, with 3 session each

dataset = BNCI2014009()
dataset.subject_list = dataset.subject_list[:3]
datasets = [dataset]
paradigm = P300()

##############################################################################
# Get data (optional)
# -------------------
#
# To get access to the EEG signals downloaded from the dataset, you could
# use `dataset.get_data([subject_id) to obtain the EEG as MNE Epochs, stored
# in a dictionary of sessions and runs.
# The `paradigm.get_data(dataset=dataset, subjects=[subject_id])` allows to
# obtain the preprocessed EEG data, the labels and the meta information. By
# default, the EEG is return as a numpy array. With `return_epochs=True`, MNE
# Epochs are returned.

subject_list = [1]
sessions = dataset.get_data(subject_list)
Beispiel #8
0
                                       Vectorizer(),
                                       LDA(solver="lsqr", shrinkage="auto"))

##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (P300) and use all three datasets available for it.
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwritten if necessary.

paradigm = P300(resample=128)
dataset = EPFLP300()
dataset.subject_list = dataset.subject_list[:2]
datasets = [dataset]
overwrite = True  # set to True if we want to overwrite cached results
evaluation = WithinSessionEvaluation(paradigm=paradigm,
                                     datasets=datasets,
                                     suffix="examples",
                                     overwrite=overwrite)
results = evaluation.process(pipelines)

##############################################################################
# Plot Results
# ----------------
#
# Here we plot the results.