Exemple #1
0
def call_data(dataset_name, subject_ids):
    '''

    :param dataset_name: (String) data name to be entered in MOABBDataset
                        (e.g. "BNCI2014001", "BNCI2014004")
    :param subject_ids: (list) A list of subject numbers to call
            BNCI2014001: 1~9
            BNCI2014004: 1~9
    :return: (list) a list of braindecode.datasets.moabb.MOABBDataset
    '''
    datasets = [
        MOABBDataset(dataset_name=dataset_name, subject_ids=[i])
        for i in subject_ids
    ]
    return datasets
Exemple #2
0
In this example, we show multiple ways of how to split datasets.
"""

# Authors: Lukas Gemein <*****@*****.**>
#
# License: BSD (3-clause)

from IPython.display import display

from braindecode.datasets import MOABBDataset
from braindecode.preprocessing.windowers import create_windows_from_events

###############################################################################
# First, we create a dataset based on BCIC IV 2a fetched with MOABB,
ds = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[1])

###############################################################################
# ds has a pandas DataFrame with additional description of its internal datasets
display(ds.description)

###############################################################################
# We can split the dataset based on the info in the description, for example
# based on different runs. The returned dictionary will have string keys
# corresponding to unique entries in the description DataFrame column
splits = ds.split("run")
display(splits)
display(splits["run_4"].description)

###############################################################################
# We can also split the dataset based on a list of integers corresponding to
Exemple #3
0
import copy

import pytest
import numpy as np

from braindecode.datasets import MOABBDataset
from braindecode.datautil.preprocess import preprocess, zscore, scale, \
    Preprocessor, filterbank, exponential_moving_demean, \
    exponential_moving_standardize, MNEPreproc, NumpyPreproc
from braindecode.datautil.windowers import create_fixed_length_windows

# We can't use fixtures with scope='module' as the dataset objects are modified
# inplace during preprocessing. To avoid the long setup time caused by calling
# the dataset/windowing functions multiple times, we instantiate the dataset
# objects once and deep-copy them in fixture.
raw_ds = MOABBDataset(dataset_name='BNCI2014001', subject_ids=[1, 2])
windows_ds = create_fixed_length_windows(raw_ds,
                                         start_offset_samples=100,
                                         stop_offset_samples=None,
                                         window_size_samples=1000,
                                         window_stride_samples=1000,
                                         drop_last_window=True,
                                         mapping=None,
                                         preload=True)


@pytest.fixture
def base_concat_ds():
    return copy.deepcopy(raw_ds)

def base_concat_ds():
    return MOABBDataset(dataset_name="BNCI2014001", subject_ids=[1, 2])
######################################################################
# Loading and preprocessing the dataset
# -------------------------------------

######################################################################
# Loading
# ~~~~~~~
#

from braindecode import EEGClassifier
from skorch.helper import predefined_split
from skorch.callbacks import LRScheduler
from braindecode.datasets import MOABBDataset

subject_id = 3
dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[subject_id])

######################################################################
# Preprocessing
# ~~~~~~~~~~~~~
#

from braindecode.preprocessing import (exponential_moving_standardize,
                                       preprocess, Preprocessor)

low_cut_hz = 4.  # low cut frequency for filtering
high_cut_hz = 38.  # high cut frequency for filtering
# Parameters for exponential moving standardization
factor_new = 1e-3
init_block_size = 1000
Exemple #6
0
def setup_concat_raw_dataset():
    return MOABBDataset(dataset_name="BNCI2014001", subject_ids=[1])
Exemple #7
0
# Authors: Lukas Gemein <*****@*****.**>
#          Hubert Banville <*****@*****.**>
#          Simon Brandt <*****@*****.**>
#
# License: BSD (3-clause)

import matplotlib.pyplot as plt

from braindecode.datasets import MOABBDataset
from braindecode.preprocessing.windowers import \
    create_windows_from_events, create_fixed_length_windows
from braindecode.preprocessing.preprocess import preprocess, Preprocessor

###############################################################################
# First, we create a dataset based on BCIC IV 2a fetched with MOABB,
dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[1])

###############################################################################
# ds has a pandas DataFrame with additional description of its internal datasets
dataset.description

##############################################################################
# We can iterate through ds which yields one time point of a continuous signal x,
# and a target y (which can be None if targets are not defined for the entire
# continuous signal).
for x, y in dataset:
    print(x.shape, y)
    break

##############################################################################
# We can apply preprocessing transforms that are defined in mne and work
Exemple #8
0
with torch.no_grad():
    dummy_input = torch.tensor(
        np.ones((1, n_chans, input_time_length, 1), dtype=np.float32),
        device=device,
    )
    n_preds_per_input = model(dummy_input).shape[2]

dataset = MOABBDataset(
    "BNCI2014001",
    subject_ids=[1],
    trial_start_offset_samples=-125,
    trial_stop_offset_samples=1000,
    supercrop_size_samples=1000,
    supercrop_stride_samples=n_preds_per_input,
    drop_samples=False,
    mapping={
        1: 0,
        2: 1,
        3: 2,
        4: 3
    },
)


class TrainTestBCICIV2aSplit(object):
    def __call__(self, dataset, y, **kwargs):
        splitted = dataset.split('session')
        return splitted['session_T'], splitted['session_E']

Split Dataset Example
=====================

In this example, we show multiple ways of how to split datasets.
"""

# Authors: Lukas Gemein <*****@*****.**>
#
# License: BSD (3-clause)

from braindecode.datasets import MOABBDataset
from braindecode.preprocessing import create_windows_from_events

###############################################################################
# First, we create a dataset based on BCIC IV 2a fetched with MOABB,
dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[1])

###############################################################################
# ds has a pandas DataFrame with additional description of its internal datasets
dataset.description

###############################################################################
# We can split the dataset based on the info in the description, for example
# based on different runs. The returned dictionary will have string keys
# corresponding to unique entries in the description DataFrame column
splits = dataset.split("run")
print(splits)
splits["run_4"].description

###############################################################################
# We can also split the dataset based on a list of integers corresponding to
# Authors: Lukas Gemein <*****@*****.**>
#
# License: BSD (3-clause)

import tempfile

from braindecode.datasets import MOABBDataset
from braindecode.preprocessing import preprocess, Preprocessor
from braindecode.datautil import load_concat_dataset
from braindecode.preprocessing import create_windows_from_events


###############################################################################
# First, we load some dataset using MOABB.
dataset = MOABBDataset(
    dataset_name='BNCI2014001',
    subject_ids=[1],
)

###############################################################################
# We can apply preprocessing steps to the dataset. It is also possible to skip
# this step and not apply any preprocessing.
preprocess(
    concat_ds=dataset,
    preprocessors=[Preprocessor(fn='resample', sfreq=10)]
)

###############################################################################
# We save the dataset to a an existing directory. It will create a '.fif' file
# for every dataset in the concat dataset. Additionally it will create two
# JSON files, the first holding the description of the dataset, the second
# holding the name of the target. If you want to store to the same directory
Exemple #11
0
Simple Moabb Dataset Example
=========================

Showcasing how to fetch and crop a moabb dataset.
"""

# Authors: Lukas Gemein <*****@*****.**>
#
# License: BSD (3-clause)

from braindecode.datasets import MOABBDataset

# create a dataset based on BCIC IV 2a fetched with moabb
ds = MOABBDataset(dataset_name="BNCI2014001",
                  subject_ids=[4],
                  trial_start_offset_samples=0,
                  trial_stop_offset_samples=1000,
                  supercrop_size_samples=1000,
                  supercrop_stride_samples=1000)

# we can iterate through ds which yields an example x, target y,
# and info as i_supercrop_in_trial, i_start_in_trial, and i_stop_in_trial
# which is required for combining supercrop predictions in the scorer
for x, y, info in ds:
    print(x.shape, y, info)
    break

# each base_ds in ds has its own info DataFrame
print(ds.datasets[-1].info)
# ds has a concattenation of all DataFrames of its datasets
print(ds.info)