Ejemplo n.º 1
0
def test_windows_from_events_(lazy_loadable_dataset):
    msg = '"trial_stop_offset_samples" too large\\. Stop of last trial ' \
          '\\(19900\\) \\+ "trial_stop_offset_samples" \\(250\\) must be ' \
          'smaller than length of recording \\(20000\\)\\.'
    with pytest.raises(ValueError, match=msg):
        create_windows_from_events(
            concat_ds=lazy_loadable_dataset, trial_start_offset_samples=0,
            trial_stop_offset_samples=250, window_size_samples=100,
            window_stride_samples=100, drop_last_window=False)
Ejemplo n.º 2
0
def test_overlapping_trial_offsets(concat_ds_targets):
    concat_ds, _ = concat_ds_targets
    with pytest.raises(NotImplementedError,
                       match='Trial overlap not implemented.'):
        create_windows_from_events(
            concat_ds=concat_ds,
            trial_start_offset_samples=-2000, trial_stop_offset_samples=0,
            window_size_samples=1000, window_stride_samples=1000,
            drop_last_window=False)
Ejemplo n.º 3
0
def test_window_sizes_too_large(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    # Window size larger than all trials
    window_size = len(concat_ds.datasets[0]) + 1
    with pytest.raises(
            ValueError, match=f'Window size {window_size} exceeds trial durat'):
        create_windows_from_events(
            concat_ds=concat_ds,
            window_size_samples=window_size,
            window_stride_samples=window_size,
            trial_start_offset_samples=0,
            trial_stop_offset_samples=0,
            drop_last_window=False,
        )

    with pytest.raises(
            ValueError, match=f'Window size {window_size} exceeds trial durat'):
        create_fixed_length_windows(
            concat_ds=concat_ds,
            window_size_samples=window_size,
            window_stride_samples=window_size,
            drop_last_window=False,
        )

    # Window size larger than one single trial
    annots = concat_ds.datasets[0].raw.annotations
    annot_0 = annots[0]
    # Window equal original trials size
    window_size = int(
        annot_0["duration"] * concat_ds.datasets[0].raw.info['sfreq'])

    # Make first trial 1 second shorter
    annot_0["duration"] -= 1

    # Replace first trial by a new shorter one
    annots.delete(0)
    del annot_0["orig_time"]
    annots.append(**annot_0)
    concat_ds.datasets[0].raw.set_annotations(annots)
    with pytest.warns(
            UserWarning,
            match=".* are being dropped as the window size .*"
    ):
        create_windows_from_events(
            concat_ds=concat_ds,
            window_size_samples=window_size,
            window_stride_samples=window_size,
            trial_start_offset_samples=0,
            trial_stop_offset_samples=0,
            drop_last_window=False,
            accepted_bads_ratio=0.5,
            on_missing='ignore'
        )
Ejemplo n.º 4
0
def test_drop_bad_windows(concat_ds_targets, drop_bad_windows, preload):
    concat_ds, _ = concat_ds_targets
    windows_from_events = create_windows_from_events(
        concat_ds=concat_ds,
        trial_start_offset_samples=0,
        trial_stop_offset_samples=0,
        window_size_samples=100,
        window_stride_samples=100,
        drop_last_window=False,
        preload=preload,
        drop_bad_windows=drop_bad_windows)

    windows_fixed_length = create_fixed_length_windows(
        concat_ds=concat_ds,
        start_offset_samples=0,
        stop_offset_samples=1000,
        window_size_samples=1000,
        window_stride_samples=1000,
        drop_last_window=False,
        preload=preload,
        drop_bad_windows=drop_bad_windows)

    assert (windows_from_events.datasets[0].windows._bad_dropped ==
            drop_bad_windows)
    assert (windows_fixed_length.datasets[0].windows._bad_dropped ==
            drop_bad_windows)
Ejemplo n.º 5
0
def test_windows_from_events_preload_false(lazy_loadable_dataset):
    windows = create_windows_from_events(
        concat_ds=lazy_loadable_dataset, trial_start_offset_samples=0,
        trial_stop_offset_samples=0, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False)

    assert all([not ds.windows.preload for ds in windows.datasets])
Ejemplo n.º 6
0
def test_windows_from_events_different_events(tmpdir_factory):
    description_expected = 5 * ['T0', 'T1'] + 4 * ['T2', 'T3'] + 2 * ['T1']
    raw = _get_raw(tmpdir_factory, description_expected[:10])
    base_ds = BaseDataset(raw, description=pd.Series({'file_id': 1}))

    raw_1 = _get_raw(tmpdir_factory, description_expected[10:])
    base_ds_1 = BaseDataset(raw_1, description=pd.Series({'file_id': 2}))
    concat_ds = BaseConcatDataset([base_ds, base_ds_1])

    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=0,
                                         trial_stop_offset_samples=0,
                                         window_size_samples=100,
                                         window_stride_samples=100,
                                         drop_last_window=False)
    description = []
    events = []
    for ds in windows.datasets:
        description += ds.windows.metadata['target'].to_list()
        events += ds.windows.events[:, 0].tolist()

    assert len(description) == 20
    np.testing.assert_array_equal(description,
                                  5 * [0, 1] + 4 * [2, 3] + 2 * [1])
    np.testing.assert_array_equal(
        np.concatenate([
            raw.time_as_index(raw.annotations.onset, use_rounding=True),
            raw_1.time_as_index(raw.annotations.onset, use_rounding=True)
        ]), events)
Ejemplo n.º 7
0
def test_dropping_last_incomplete_window(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(
        concat_ds=concat_ds,
        trial_start_offset_samples=-250, trial_stop_offset_samples=-750,
        window_size_samples=250, window_stride_samples=300,
        drop_last_window=True)
    description = windows.datasets[0].windows.metadata["target"].to_list()
    assert len(description) == len(targets)
    np.testing.assert_array_equal(description, targets)
Ejemplo n.º 8
0
def test_stride_has_no_effect(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(
        concat_ds=concat_ds,
        trial_start_offset_samples=0, trial_stop_offset_samples=0,
        window_size_samples=1000, window_stride_samples=1000,
        drop_last_window=False)
    description = windows.datasets[0].windows.metadata["target"].to_list()
    assert len(description) == len(targets)
    np.testing.assert_array_equal(description, targets)
Ejemplo n.º 9
0
def test_maximally_overlapping_windows(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(
        concat_ds=concat_ds,
        trial_start_offset_samples=-2, trial_stop_offset_samples=0,
        window_size_samples=1000, window_stride_samples=1,
        drop_last_window=False)
    description = windows.datasets[0].windows.metadata["target"].to_list()
    assert len(description) == len(targets) * 3
    np.testing.assert_array_equal(description[0::3], targets)
    np.testing.assert_array_equal(description[1::3], targets)
    np.testing.assert_array_equal(description[2::3], targets)
Ejemplo n.º 10
0
def test_epochs_kwargs(lazy_loadable_dataset):
    picks = ['ch0']
    on_missing = 'warning'
    flat = {'eeg': 3e-6}
    reject = {'eeg': 43e-6}

    windows = create_windows_from_events(
        concat_ds=lazy_loadable_dataset, trial_start_offset_samples=0,
        trial_stop_offset_samples=0, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False, picks=picks,
        on_missing=on_missing, flat=flat, reject=reject)

    epochs = windows.datasets[0].windows
    assert epochs.ch_names == picks
    assert epochs.reject == reject
    assert epochs.flat == flat
    for ds in windows.datasets:
        assert ds.window_kwargs == [
            ('create_windows_from_events', {
                'infer_mapping': True, 'infer_window_size_stride': False,
                'trial_start_offset_samples': 0, 'trial_stop_offset_samples': 0,
                'window_size_samples': 100, 'window_stride_samples': 100,
                'drop_last_window': False, 'mapping': {'test': 0}, 'preload': False,
                'drop_bad_windows': True, 'picks': picks, 'reject': reject,
                'flat': flat, 'on_missing': on_missing,
                'accepted_bads_ratio': 0.0})
        ]

    windows = create_fixed_length_windows(
        concat_ds=lazy_loadable_dataset, start_offset_samples=0,
        stop_offset_samples=None, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False, picks=picks,
        on_missing=on_missing, flat=flat, reject=reject)

    epochs = windows.datasets[0].windows
    assert epochs.ch_names == picks
    assert epochs.reject == reject
    assert epochs.flat == flat
    for ds in windows.datasets:
        assert ds.window_kwargs == [
            ('create_fixed_length_windows', {
                'start_offset_samples': 0, 'stop_offset_samples': None,
                'window_size_samples': 100, 'window_stride_samples': 100,
                'drop_last_window': False, 'mapping': None, 'preload': False,
                'drop_bad_windows': True, 'picks': picks, 'reject': reject,
                'flat': flat, 'targets_from': 'metadata', 'last_target_only': True,
                'on_missing': on_missing}),
            ('WindowsDataset', {
                'targets_from': 'metadata',
                'last_target_only': True,
            })
        ]
Ejemplo n.º 11
0
def test_windows_from_events_mapping_filter(tmpdir_factory):
    raw = _get_raw(tmpdir_factory, 5 * ['T0', 'T1'])
    base_ds = BaseDataset(raw, description=pd.Series({'file_id': 1}))
    concat_ds = BaseConcatDataset([base_ds])

    windows = create_windows_from_events(
        concat_ds=concat_ds, trial_start_offset_samples=0,
        trial_stop_offset_samples=0, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False, mapping={'T1': 0})
    description = windows.datasets[0].windows.metadata['target'].to_list()

    assert len(description) == 5
    np.testing.assert_array_equal(description, np.zeros(5))
    # dataset should contain only 'T1' events
    np.testing.assert_array_equal(
        (raw.time_as_index(raw.annotations.onset[1::2], use_rounding=True)),
        windows.datasets[0].windows.events[:, 0])
Ejemplo n.º 12
0
def test_windows_from_events_n_jobs(lazy_loadable_dataset):
    longer_dataset = BaseConcatDataset([lazy_loadable_dataset.datasets[0]] * 8)
    windows = [create_windows_from_events(
        concat_ds=longer_dataset, trial_start_offset_samples=0,
        trial_stop_offset_samples=0, window_size_samples=100,
        window_stride_samples=100, drop_last_window=False, preload=True,
        n_jobs=n_jobs) for n_jobs in [1, 2]]

    assert windows[0].description.equals(windows[1].description)
    for ds1, ds2 in zip(windows[0].datasets, windows[1].datasets):
        # assert ds1.windows == ds2.windows  # Runs locally, fails in CI
        assert np.allclose(ds1.windows.get_data(), ds2.windows.get_data())
        assert pd.Series(ds1.windows.info).to_json() == \
               pd.Series(ds2.windows.info).to_json()
        assert ds1.description.equals(ds2.description)
        assert np.array_equal(ds1.y, ds2.y)
        assert np.array_equal(ds1.crop_inds, ds2.crop_inds)
Ejemplo n.º 13
0
def test_single_sample_size_windows(concat_ds_targets):
    concat_ds, targets = concat_ds_targets
    # reduce dataset for faster test, only first 3 events
    targets = targets[:3]
    underlying_raw = concat_ds.datasets[0].raw
    annotations = underlying_raw.annotations
    underlying_raw.set_annotations(annotations[:3])
    # have to supply explicit mapping as only two classes appear in first 3
    # targets
    windows = create_windows_from_events(
        concat_ds=concat_ds,
        trial_start_offset_samples=0, trial_stop_offset_samples=0,
        window_size_samples=1, window_stride_samples=1,
        drop_last_window=False, mapping=dict(tongue=3, left_hand=1,
                                             right_hand=2, feet=4))
    description = windows.datasets[0].windows.metadata["target"].to_list()
    assert len(description) == len(targets) * 1000
    np.testing.assert_array_equal(description[::1000], targets)
    np.testing.assert_array_equal(description[999::1000], targets)
Ejemplo n.º 14
0
def test_windows_from_events_cropped(lazy_loadable_dataset):
    """Test windowing from events on cropped data.

    Cropping raw data changes the `first_samp` attribute of the Raw object, and
    so it is important to test this is taken into account by the windowers.
    """
    tmin, tmax = 100, 120

    ds = copy.deepcopy(lazy_loadable_dataset)
    ds.datasets[0].raw.annotations.crop(tmin, tmax)

    crop_ds = copy.deepcopy(lazy_loadable_dataset)
    crop_transform = Preprocessor('crop', tmin=tmin, tmax=tmax)
    preprocess(crop_ds, [crop_transform])

    # Extract windows
    windows1 = create_windows_from_events(concat_ds=ds,
                                          trial_start_offset_samples=0,
                                          trial_stop_offset_samples=0,
                                          window_size_samples=100,
                                          window_stride_samples=100,
                                          drop_last_window=False)
    windows2 = create_windows_from_events(concat_ds=crop_ds,
                                          trial_start_offset_samples=0,
                                          trial_stop_offset_samples=0,
                                          window_size_samples=100,
                                          window_stride_samples=100,
                                          drop_last_window=False)
    assert (windows1[0][0] == windows2[0][0]).all()

    # Make sure events that fall outside of recording will trigger an error
    with pytest.raises(ValueError,
                       match='"trial_stop_offset_samples" too large'):
        create_windows_from_events(concat_ds=ds,
                                   trial_start_offset_samples=0,
                                   trial_stop_offset_samples=10000,
                                   window_size_samples=100,
                                   window_stride_samples=100,
                                   drop_last_window=False)
    with pytest.raises(ValueError,
                       match='"trial_stop_offset_samples" too large'):
        create_windows_from_events(concat_ds=crop_ds,
                                   trial_start_offset_samples=0,
                                   trial_stop_offset_samples=2001,
                                   window_size_samples=100,
                                   window_stride_samples=100,
                                   drop_last_window=False)
Ejemplo n.º 15
0
def test_epochs_kwargs(lazy_loadable_dataset):
    picks = ['ch0']
    on_missing = 'warning'
    flat = {'eeg': 3e-6}
    reject = {'eeg': 43e-6}

    windows = create_windows_from_events(concat_ds=lazy_loadable_dataset,
                                         trial_start_offset_samples=0,
                                         trial_stop_offset_samples=0,
                                         window_size_samples=100,
                                         window_stride_samples=100,
                                         drop_last_window=False,
                                         picks=picks,
                                         on_missing=on_missing,
                                         flat=flat,
                                         reject=reject)

    epochs = windows.datasets[0].windows
    assert epochs.ch_names == picks
    assert epochs.reject == reject
    assert epochs.flat == flat

    windows = create_fixed_length_windows(concat_ds=lazy_loadable_dataset,
                                          start_offset_samples=0,
                                          stop_offset_samples=None,
                                          window_size_samples=100,
                                          window_stride_samples=100,
                                          drop_last_window=False,
                                          picks=picks,
                                          on_missing=on_missing,
                                          flat=flat,
                                          reject=reject)

    epochs = windows.datasets[0].windows
    assert epochs.ch_names == picks
    assert epochs.reject == reject
    assert epochs.flat == flat
Ejemplo n.º 16
0
]
print(dataset.datasets[0].raw.info["sfreq"])
preprocess(dataset, preprocessors)
print(dataset.datasets[0].raw.info["sfreq"])

###############################################################################
# We can easily split ds based on a criteria applied to the description
# DataFrame:
subsets = dataset.split("session")
print({subset_name: len(subset) for subset_name, subset in subsets.items()})

###############################################################################
# Next, we use a windower to extract events from the dataset based on events:
windows_dataset = create_windows_from_events(dataset,
                                             trial_start_offset_samples=0,
                                             trial_stop_offset_samples=100,
                                             window_size_samples=400,
                                             window_stride_samples=100,
                                             drop_last_window=False)

###############################################################################
# We can iterate through the windows_ds which yields a window x,
# a target y, and window_ind (which itself contains ``i_window_in_trial``,
# ``i_start_in_trial``, and ``i_stop_in_trial``, which are required for
# combining window predictions in the scorer).
for x, y, window_ind in windows_dataset:
    print(x.shape, y, window_ind)
    break

###############################################################################
# We visually inspect the windows:
max_i = 2
Ejemplo n.º 17
0
trial_start_offset_seconds = -0.5
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

# Create windows using braindecode function for this. It needs parameters to define how
# trials should be used.
windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=trial_start_offset_samples,
    trial_stop_offset_samples=0,
    window_size_samples=input_window_samples,
    window_stride_samples=n_preds_per_input,
    drop_last_window=False,
    preload=True
)


######################################################################
# Split the dataset
# -----------------
#
# This code is the same as in trialwise decoding.
#

splitted = windows_dataset.split('session')
train_set = splitted['session_T']
Ejemplo n.º 18
0
def test_window_sizes_from_events(concat_ds_targets):
    # no fixed window size, no offsets
    expected_n_samples = 1000
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=0,
                                         trial_stop_offset_samples=0,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples

    # no fixed window size, positive trial start offset
    expected_n_samples = 999
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=1,
                                         trial_stop_offset_samples=0,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples

    # no fixed window size, negative trial start offset
    expected_n_samples = 1001
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=-1,
                                         trial_stop_offset_samples=0,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples

    # no fixed window size, positive trial stop offset
    expected_n_samples = 1001
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=0,
                                         trial_stop_offset_samples=1,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples

    # no fixed window size, negative trial stop offset
    expected_n_samples = 999
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=0,
                                         trial_stop_offset_samples=-1,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples

    # fixed window size, trial offsets should not change window size
    expected_n_samples = 250
    concat_ds, targets = concat_ds_targets
    windows = create_windows_from_events(concat_ds=concat_ds,
                                         trial_start_offset_samples=3,
                                         trial_stop_offset_samples=8,
                                         window_size_samples=250,
                                         window_stride_samples=250,
                                         drop_last_window=False)
    x, y, ind = windows[0]
    assert x.shape[-1] == ind[-1] - ind[-2]
    assert x.shape[-1] == expected_n_samples
Ejemplo n.º 19
0
def test_predict_trials():
    ds = MOABBDataset('BNCI2014001', subject_ids=1)
    ds1 = ds.split([0])['0']

    # determine original trial size
    windows_ds1 = create_windows_from_events(
        ds1,
    )
    trial_size = windows_ds1[0][0].shape[1]

    # create two windows per trial, where windows maximally overlap
    window_size_samples = trial_size - 1
    window_stride_samples = 5
    windows_ds1 = create_windows_from_events(
        ds1,
        window_size_samples=window_size_samples,
        window_stride_samples=window_stride_samples,
        drop_last_window=False,
    )

    in_chans = windows_ds1[0][0].shape[0]
    n_classes = len(windows_ds1.get_metadata()['target'].unique())
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
    )
    to_dense_prediction_model(model)

    output_shape = get_output_shape(model, in_chans, window_size_samples)
    # the number of samples required to get 1 output
    receptive_field_size = window_size_samples - output_shape[-1] + 1

    preds, targets = predict_trials(model, windows_ds1)

    # some model, cropped data
    assert preds.shape[-1] + receptive_field_size - 1 == trial_size
    assert preds.shape[1] == n_classes
    assert preds.shape[0] == targets.shape[0]
    metadata = windows_ds1.get_metadata()
    expected_targets = metadata[metadata['i_window_in_trial'] == 0][
        'target'].values
    np.testing.assert_array_equal(expected_targets, targets)

    # some model, trialwise data
    windows_ds2 = create_windows_from_events(ds1)
    with pytest.warns(UserWarning, match='This function was designed to predict'
                                         ' trials from cropped datasets.'):
        predict_trials(model, windows_ds2)

    # cropped EEGClassifier, cropped data
    clf = EEGClassifier(
        model,
        criterion=torch.nn.NLLLoss,
        optimizer=optim.AdamW,
        train_split=None,
        optimizer__lr=0.0625 * 0.01,
        optimizer__weight_decay=0,
        batch_size=64,
    )
    clf.initialize()
    clf.predict_trials(windows_ds1, return_targets=True)

    # cropped EEGClassifier, trialwise data
    with pytest.warns(UserWarning, match="This method was designed to predict "
                                         "trials in cropped mode. Calling it "
                                         "when cropped is False will give the "
                                         "same result as '.predict'."):
        clf.predict_trials(windows_ds2)
#

from braindecode.preprocessing import create_windows_from_events

trial_start_offset_seconds = -0.5
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])
# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

# Create windows using braindecode function for this. It needs parameters to
# define how trials should be used.
windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=trial_start_offset_samples,
    trial_stop_offset_samples=0,
    preload=True,
)

######################################################################
# Split dataset into train and valid
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#

splitted = windows_dataset.split('session')
train_set = splitted['session_T']
valid_set = splitted['session_E']

######################################################################
# Defining a Transform
# --------------------
mapping = {  # We merge stages 3 and 4 following AASM standards.
    'Sleep stage W': 0,
    'Sleep stage 1': 1,
    'Sleep stage 2': 2,
    'Sleep stage 3': 3,
    'Sleep stage 4': 3,
    'Sleep stage R': 4
}

window_size_s = 30
sfreq = 100
window_size_samples = window_size_s * sfreq

windows_dataset = create_windows_from_events(
    dataset, trial_start_offset_samples=0, trial_stop_offset_samples=0,
    window_size_samples=window_size_samples,
    window_stride_samples=window_size_samples, preload=True, mapping=mapping)


######################################################################
# Window preprocessing
# ~~~~~~~~~~~~~~~~~~~~
#


######################################################################
# We also preprocess the windows by applying channel-wise z-score normalization
# in each window.
#

from braindecode.preprocessing.preprocess import zscore
Ejemplo n.º 22
0
def setup_concat_windows_dataset(setup_concat_raw_dataset):
    moabb_dataset = setup_concat_raw_dataset
    return create_windows_from_events(concat_ds=moabb_dataset,
                                      trial_start_offset_samples=0,
                                      trial_stop_offset_samples=0)
Ejemplo n.º 23
0
###############################################################################
# If we want to split based on a list of indices but you want to specify
# the keys in the output dictionary you can pass a dict as:
splits = dataset.split({
    "train": [0, 1, 5],
    "valid": [2, 3, 4],
    "test": [6, 7, 8, 9, 10, 11]
})
print(splits)
splits["test"].description

###############################################################################
# Similarly, we can split datasets after creating windows
windows = create_windows_from_events(dataset,
                                     trial_start_offset_samples=0,
                                     trial_stop_offset_samples=0)
splits = windows.split("run")
splits

###############################################################################
splits = windows.split([4, 8])
splits

###############################################################################
splits = windows.split([[4, 8], [5, 9, 11]])
splits

###############################################################################
splits = windows.split(dict(train=[4, 8], test=[5, 9, 11]))
splits
    'Sleep stage 1': 1,
    'Sleep stage 2': 2,
    'Sleep stage 3': 3,
    'Sleep stage 4': 3,
    'Sleep stage R': 4
}

window_size_s = 30
sfreq = 100
window_size_samples = window_size_s * sfreq

windows_dataset = create_windows_from_events(
    dataset,
    trial_start_offset_samples=0,
    trial_stop_offset_samples=0,
    window_size_samples=window_size_samples,
    window_stride_samples=window_size_samples,
    picks="Fpz-Cz",  # the other option is Pz-Oz,
    preload=True,
    mapping=mapping)

######################################################################
# Window preprocessing
# ~~~~~~~~~~~~~~~~~~~~
#
# We also preprocess the windows by applying channel-wise z-score normalization
# in each window.

from sklearn.preprocessing import scale as standard_scale

preprocess(windows_dataset, [Preprocessor(standard_scale, channel_wise=True)])
# supports it (TUHAbnormal for example supports 'pathological', 'age', and
# 'gender'. If you stored a preprocessed version with target 'pathological'
# it is possible to change the target upon loading).
dataset_loaded = load_concat_dataset(
    path=tmpdir,
    preload=True,
    ids_to_load=[1, 3],
    target_name=None,
)

##############################################################################
# The serialization utility also supports WindowsDatasets, so we create
# compute windows next.
windows_dataset = create_windows_from_events(
    concat_ds=dataset_loaded,
    trial_start_offset_samples=0,
    trial_stop_offset_samples=0,
)

windows_dataset.description

##############################################################################
# Again, we save the dataset to an existing directory. It will create a
# '-epo.fif' file for every dataset in the concat dataset. Additionally it
# will create a JSON file holding the description of the dataset. If you
# want to store to the same directory several times, for example due to
# trying different windowing parameters, you can choose to overwrite the
# existing files.
windows_dataset.save(
    path=tmpdir,
    overwrite=True,