Exemple #1
0
def concat_windows_dataset(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    windows_ds = create_windows_from_events(
        concat_ds=concat_ds, trial_start_offset_samples=0,
        trial_stop_offset_samples=0, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False)

    return windows_ds
Exemple #2
0
def target_windows_ds():
    raws, description = fetch_data_with_moabb(dataset_name='BNCI2014001',
                                              subject_ids=4)
    ds = [BaseDataset(raws[i], description.iloc[i]) for i in range(3)]
    concat_ds = BaseConcatDataset(ds)

    windows_ds = create_windows_from_events(concat_ds,
                                            trial_start_offset_samples=0,
                                            trial_stop_offset_samples=0,
                                            window_size_samples=None,
                                            window_stride_samples=None,
                                            drop_last_window=False)

    return windows_ds
Exemple #3
0
display(splits["run_4"].description)

###############################################################################
# We can also split the dataset based on a list of integers corresponding to
# rows in the description. In this case, the returned dictionary will have
# '0' as the only key
splits = ds.split([0, 1, 5])
display(splits)
display(splits["0"].description)

###############################################################################
# If we want multiple splits based on indices, we can also specify a list of
# list of integers. In this case, the dictionary will have string keys
# representing the id of the dataset split in the order of the given list of
# integers
splits = ds.split([[0, 1, 5], [2, 3, 4], [6, 7, 8, 9, 10, 11]])
display(splits)
display(splits["2"].description)

###############################################################################
# Similarly, we can split datasets after creating windows
windows = create_windows_from_events(ds,
                                     trial_start_offset_samples=0,
                                     trial_stop_offset_samples=0)
splits = windows.split("run")
display(splits)
splits = windows.split([4, 8])
display(splits)
splits = windows.split([[4, 8], [5, 9, 11]])
display(splits)
Exemple #4
0
# supports it (TUHAbnormal for example supports 'pathological', 'age', and
# 'gender'. If you stored a preprocessed version with target 'pathological'
# it is possible to change the target upon loading).
ds_loaded = load_concat_dataset(
    path='./',
    preload=True,
    ids_to_load=[1, 3],
    target_name=None,
)

##############################################################################
# The serialization utility also supports WindowsDatasets, so we create
# compute windows next.
windows_ds = create_windows_from_events(
    concat_ds=ds_loaded,
    trial_start_offset_samples=0,
    trial_stop_offset_samples=0,
)

##############################################################################
# Again, we save the dataset to an existing directory. It will create a
# '-epo.fif' file for every dataset in the concat dataset. Additionally it
# will create a JSON file holding the description of the dataset. If you
# want to store to the same directory several times, for example due to
# trying different windowing parameters, you can choose to overwrite the
# existing files.
windows_ds.save(
    path='./',
    overwrite=True,
)
Exemple #5
0
def setup_concat_windows_dataset(setup_concat_raw_dataset):
    moabb_dataset = setup_concat_raw_dataset
    return create_windows_from_events(concat_ds=moabb_dataset,
                                      trial_start_offset_samples=0,
                                      trial_stop_offset_samples=0)
trial_start_offset_seconds = -0.5
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

# Create windows using braindecode function for this. It needs parameters to define how
# trials should be used.
windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=trial_start_offset_samples,
    trial_stop_offset_samples=0,
    window_size_samples=input_window_samples,
    window_stride_samples=n_preds_per_input,
    drop_last_window=False,
    preload=True
)


######################################################################
# Split the dataset
# -----------------
#
# This code is the same as in trialwise decoding.
#

splitted = windows_dataset.split('session')
train_set = splitted['session_T']
Exemple #7
0
from braindecode.preprocessing.windowers import create_windows_from_events

trial_start_offset_seconds = -0.0
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])
# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

# Create windows using braindecode function for this. It needs parameters to define how
# trials should be used.
windows_dataset = create_windows_from_events(
    dataset,
    window_size_samples= 512, 
    window_stride_samples=256,#TBD
    trial_start_offset_samples=trial_start_offset_samples,
    trial_stop_offset_samples=0,
    preload=True,
)

######################################################################
# We also preprocess the windows by applying channel-wise z-score normalization.
#

from braindecode.preprocessing.preprocess import zscore

preprocess(windows_dataset, [Preprocessor(zscore)])

######################################################################
# Split dataset into train and valid
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Exemple #8
0
]
print(dataset.datasets[0].raw.info["sfreq"])
preprocess(dataset, preprocessors)
print(dataset.datasets[0].raw.info["sfreq"])

###############################################################################
# We can easily split ds based on a criteria applied to the description
# DataFrame:
subsets = dataset.split("session")
print({subset_name: len(subset) for subset_name, subset in subsets.items()})

###############################################################################
# Next, we use a windower to extract events from the dataset based on events:
windows_dataset = create_windows_from_events(dataset,
                                             trial_start_offset_samples=0,
                                             trial_stop_offset_samples=100,
                                             window_size_samples=400,
                                             window_stride_samples=100,
                                             drop_last_window=False)

###############################################################################
# We can iterate through the windows_ds which yields a window x,
# a target y, and window_ind (which itself contains ``i_window_in_trial``,
# ``i_start_in_trial``, and ``i_stop_in_trial``, which are required for
# combining window predictions in the scorer).
for x, y, window_ind in windows_dataset:
    print(x.shape, y, window_ind)
    break

###############################################################################
# We visually inspect the windows:
max_i = 2
Exemple #9
0
sfreq = 100
window_size_samples = window_size_s * sfreq

mapping = {  # We merge stages 3 and 4 following AASM standards.
    'Sleep stage W': 0,
    'Sleep stage 1': 1,
    'Sleep stage 2': 2,
    'Sleep stage 3': 3,
    'Sleep stage 4': 3,
    'Sleep stage R': 4
}

windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=0,
    trial_stop_offset_samples=0,
    window_size_samples=window_size_samples,
    window_stride_samples=window_size_samples,
    preload=True,
    mapping=mapping)

######################################################################
# Preprocessing windows
# ~~~~~~~~~~~~~~~~~~~~~
#
# We also preprocess the windows by applying channel-wise z-score normalization.

from sklearn.preprocessing import scale as standard_scale

preprocess(windows_dataset, [Preprocessor(standard_scale, channel_wise=True)])

######################################################################
#

from braindecode.preprocessing.windowers import create_windows_from_events

trial_start_offset_seconds = -0.5
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])
# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

# Create windows using braindecode function for this. It needs parameters to define how
# trials should be used.
windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=trial_start_offset_samples,
    trial_stop_offset_samples=0,
    preload=True,
)

######################################################################
# Split dataset into train and valid
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#

######################################################################
# We can easily split the dataset using additional info stored in the
# description attribute, in this case ``session`` column. We select
# ``session_T`` for training and ``session_E`` for validation.
#

splitted = windows_dataset.split('session')