Ejemplo n.º 1
0
def windows_ds():
    raws, description = fetch_data_with_moabb(
        dataset_name='BNCI2014001', subject_ids=4)
    ds = [BaseDataset(raws[i], description.iloc[i]) for i in range(3)]
    concat_ds = BaseConcatDataset(ds)

    windows_ds = create_fixed_length_windows(
        concat_ds=concat_ds, start_offset_samples=0, stop_offset_samples=None,
        window_size_samples=500, window_stride_samples=500,
        drop_last_window=False, preload=False)

    return windows_ds
Ejemplo n.º 2
0
def test_multi_target_dataset(set_up):
    _, base_dataset, _, _, _, _ = set_up
    base_dataset.target_name = ['pathological', 'gender', 'age']
    x, y = base_dataset[0]
    assert len(y) == 3
    assert base_dataset.description.to_list() == y
    concat_ds = BaseConcatDataset([base_dataset])
    windows_ds = create_fixed_length_windows(
        concat_ds,
        window_size_samples=100,
        window_stride_samples=100,
        start_offset_samples=0,
        stop_offset_samples=None,
        drop_last_window=False,
        mapping={True: 1, False: 0, 'M': 0, 'F': 1},  # map non-digit targets
    )
    x, y, ind = windows_ds[0]
    assert len(y) == 3
    assert y == [1, 0, 48]  # order matters: pathological, gender, age
Ejemplo n.º 3
0
for rec_i, tuh_subset in tuh_splits.items():
    # implement preprocess for BaseDatasets? Would remove necessity
    # to split above
    preprocess(tuh_subset, preprocessors)

    # update description of the recording(s)
    tuh_subset.description.sfreq = len(tuh_subset.datasets) * [sfreq]
    tuh_subset.description.reference = len(tuh_subset.datasets) * ['ar']
    tuh_subset.description.n_samples = [len(d) for d in tuh_subset.datasets]

    if create_compute_windows:
        # generate compute windows here and store them to disk
        tuh_windows = create_fixed_length_windows(
            tuh_subset,
            start_offset_samples=0,
            stop_offset_samples=None,
            window_size_samples=window_size_samples,
            window_stride_samples=window_stride_samples,
            drop_last_window=False)
        # save memory by deleting raw recording
        del tuh_subset
        # store the number of windows required for loading later on
        tuh_windows.description["n_windows"] = [
            len(d) for d in tuh_windows.datasets
        ]

        # create one directory for every recording
        rec_path = os.path.join(OUT_PATH, str(rec_i))
        if not os.path.exists(rec_path):
            os.makedirs(rec_path)
        save_concat_dataset(rec_path, tuh_windows)
Ejemplo n.º 4
0
from braindecode.preprocessing.preprocess import (
    preprocess, zscore, Preprocessor, filterbank, exponential_moving_demean,
    exponential_moving_standardize, MNEPreproc, NumpyPreproc, _replace_inplace,
    _set_preproc_kwargs)
from braindecode.preprocessing.preprocess import scale as deprecated_scale
from braindecode.preprocessing.windowers import create_fixed_length_windows
from braindecode.datautil.serialization import load_concat_dataset


# We can't use fixtures with scope='module' as the dataset objects are modified
# inplace during preprocessing. To avoid the long setup time caused by calling
# the dataset/windowing functions multiple times, we instantiate the dataset
# objects once and deep-copy them in fixture.
raw_ds = MOABBDataset(dataset_name='BNCI2014001', subject_ids=[1, 2])
windows_ds = create_fixed_length_windows(
    raw_ds, start_offset_samples=100, stop_offset_samples=None,
    window_size_samples=1000, window_stride_samples=1000,
    drop_last_window=True, mapping=None, preload=True)


@pytest.fixture
def base_concat_ds():
    return copy.deepcopy(raw_ds)


@pytest.fixture
def windows_concat_ds():
    return copy.deepcopy(windows_ds)


def modify_windows_object(epochs, factor=1):
    epochs._data *= factor
Ejemplo n.º 5
0
for i, (x, y, window_ind) in enumerate(windows_dataset):
    ax_arr[i].plot(x.T)
    ax_arr[i].set_ylim(-4e-5, 4e-5)
    ax_arr[i].set_title(f"label={y}")
    if i == max_i:
        break

fig.tight_layout()

###############################################################################
# Alternatively, we can create evenly spaced ("sliding") windows using a
# different windower.
sliding_windows_dataset = create_fixed_length_windows(
    dataset,
    start_offset_samples=0,
    stop_offset_samples=0,
    window_size_samples=1200,
    window_stride_samples=1000,
    drop_last_window=False)

print(len(sliding_windows_dataset))
for x, y, window_ind in sliding_windows_dataset:
    print(x.shape, y, window_ind)
    break

sliding_windows_dataset.description

###############################################################################
# Transforms can also be applied on windows in the same way as shown
# above on continuous data:
x, y = tuh[-1]
print('x:', x)
print('y:', y)

###############################################################################
# We will skip preprocessing steps for now, since it is not the aim of this
# example. Instead, we will directly create compute windows. We specify a
# mapping from genders 'M' and 'F' to integers, since this is required for
# decoding.

tuh_windows = create_fixed_length_windows(
    tuh,
    start_offset_samples=0,
    stop_offset_samples=None,
    window_size_samples=1000,
    window_stride_samples=1000,
    drop_last_window=False,
    mapping={
        'M': 0,
        'F': 1
    },  # map non-digit targets
)
# store the number of windows required for loading later on
tuh_windows.set_description(
    {"n_windows": [len(d) for d in tuh_windows.datasets]})

###############################################################################
# Iterating through the dataset gives x as ndarray(n_channels x 1000), y as
# [age, gender], and ind. Let's look at the last example again.
x, y, ind = tuh_windows[-1]
print('x:', x)
print('y:', y)