Пример #1
0
def test_bdf_stim_channel():
    """Test BDF stim channel."""
    # test if last channel is detected as STIM by default
    raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path,
                              stim_channel='auto')
    assert channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim'

    # test BDF file with wrong scaling info in header - this should be ignored
    # for BDF stim channels
    events = [[242, 0, 4],
              [310, 0, 2],
              [952, 0, 1],
              [1606, 0, 1],
              [2249, 0, 1],
              [2900, 0, 1],
              [3537, 0, 1],
              [4162, 0, 1],
              [4790, 0, 1]]
    with pytest.deprecated_call(match='stim_channel'):
        raw = read_raw_edf(bdf_stim_channel_path, preload=True)
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
    raw = read_raw_edf(bdf_stim_channel_path, preload=False,
                       stim_channel='auto')
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
Пример #2
0
def test_edf_data():
    """Test edf files"""
    _test_raw_reader(read_raw_edf, input_fname=edf_path, stim_channel=None)
    raw_py = read_raw_edf(edf_path, preload=True)
    # Test saving and loading when annotations were parsed.
    tempdir = _TempDir()
    raw_file = op.join(tempdir, 'test-raw.fif')
    raw_py.save(raw_file, overwrite=True, buffer_size_sec=1)
    Raw(raw_file, preload=True)

    edf_events = find_events(raw_py, output='step', shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2],
              [0.3904, 1.0000, 2],
              [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)
Пример #3
0
def test_edf_data():
    """Test edf files."""
    raw = _test_raw_reader(read_raw_edf, input_fname=edf_path,
                           stim_channel=None, exclude=['Ergo-Left', 'H10'])
    raw_py = read_raw_edf(edf_path, preload=True)
    assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
    # Test saving and loading when annotations were parsed.
    edf_events = find_events(raw_py, output='step', shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2],
              [0.3904, 1.0000, 2],
              [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)
Пример #4
0
def test_edf_annotations():
    """Test if events are detected correctly in a typical MNE workflow."""

    # test an actual file
    raw = read_raw_edf(edf_path, preload=True)
    edf_events = find_events(raw, output='step', shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2],
              [0.3904, 1.0000, 2],
              [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)
Пример #5
0
def test_stim_channel():
    """Test reading raw edf files with stim channel."""
    raw_py = read_raw_edf(edf_path,
                          misc=range(-4, 0),
                          stim_channel=139,
                          preload=True)

    picks = pick_types(raw_py.info,
                       meg=False,
                       eeg=True,
                       exclude=['EDF Annotations'])
    data_py, _ = raw_py[picks]

    print(raw_py)  # to test repr
    print(raw_py.info)  # to test Info repr

    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_eeglab_path)
    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
    data_eeglab = raw_eeglab[picks]

    assert_array_almost_equal(data_py, data_eeglab, 10)
    events = find_edf_events(raw_py)
    assert_true(len(events) - 1 == len(find_events(raw_py)))  # start not found

    # Test uneven sampling
    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
    data_py, _ = raw_py[0]
    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
    raw_eeglab = raw_eeglab['data']
    data_eeglab = raw_eeglab[0]

    # match upsampling
    upsample = len(data_eeglab) / len(raw_py)
    data_py = np.repeat(data_py, repeats=upsample)
    assert_array_equal(data_py, data_eeglab)

    assert_raises(RuntimeError,
                  read_raw_edf,
                  edf_path,
                  preload=False,
                  stim_channel=-1)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_edf(edf_stim_resamp_path, verbose=True, stim_channel=-1)
    assert_equal(len(w), 1)
    assert_true('Events may jitter' in str(w[0].message))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw[:]
    assert_equal(len(w), 0)

    events = raw_py.find_edf_events()
    assert_true(len(events) == 0)
Пример #6
0
def test_edf_data():
    """Test edf files."""
    raw = _test_raw_reader(read_raw_edf,
                           input_fname=edf_path,
                           stim_channel=None,
                           exclude=['Ergo-Left', 'H10'],
                           verbose='error')
    raw_py = read_raw_edf(edf_path, stim_channel='auto', preload=True)

    assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
    # Test saving and loading when annotations were parsed.
    edf_events = find_events(raw_py,
                             output='step',
                             shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2], [0.3904, 1.0000, 2], [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)

    # Test with number of records not in header (-1).
    tempdir = _TempDir()
    broken_fname = op.join(tempdir, 'broken.edf')
    with open(edf_path, 'rb') as fid_in:
        fid_in.seek(0, 2)
        n_bytes = fid_in.tell()
        fid_in.seek(0, 0)
        rbytes = fid_in.read(int(n_bytes * 0.4))
    with open(broken_fname, 'wb') as fid_out:
        fid_out.write(rbytes[:236])
        fid_out.write(bytes('-1      '.encode()))
        fid_out.write(rbytes[244:])
    with pytest.warns(RuntimeWarning,
                      match='records .* not match the file size'):
        raw = read_raw_edf(broken_fname, preload=True, stim_channel='auto')
        read_raw_edf(broken_fname,
                     exclude=raw.ch_names[:132],
                     preload=True,
                     stim_channel='auto')
Пример #7
0
def create_mdm(raw, event_id):
    tmin, tmax = -1., 4.
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    baseline=None, preload=True, verbose=False)
    labels = epochs.events[:, -1]
    epochs_data_train = epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    mdm.fit(cov_data_train, labels)
    return mdm
Пример #8
0
def test_bdf_stim_channel():
    """Test BDF stim channel."""
    # test if last channel is detected as STIM by default
    raw_py = _test_raw_reader(read_raw_edf,
                              input_fname=bdf_path,
                              stim_channel='auto')
    assert channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim'

    # test BDF file with wrong scaling info in header - this should be ignored
    # for BDF stim channels
    events = [[242, 0, 4], [310, 0, 2], [952, 0, 1], [1606, 0, 1],
              [2249, 0, 1], [2900, 0, 1], [3537, 0, 1], [4162, 0, 1],
              [4790, 0, 1]]
    raw = read_raw_edf(bdf_stim_channel_path, preload=True)
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
    raw = read_raw_edf(bdf_stim_channel_path,
                       preload=False,
                       stim_channel='auto')
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
Пример #9
0
def test_stim_channel():
    """Test reading raw edf files with stim channel."""
    raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
                          preload=True)

    picks = pick_types(raw_py.info, meg=False, eeg=True,
                       exclude=['EDF Annotations'])
    data_py, _ = raw_py[picks]

    print(raw_py)  # to test repr
    print(raw_py.info)  # to test Info repr

    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_eeglab_path)
    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
    data_eeglab = raw_eeglab[picks]

    assert_array_almost_equal(data_py, data_eeglab, 10)
    events = find_edf_events(raw_py)
    assert_true(len(events) - 1 == len(find_events(raw_py)))  # start not found

    # Test uneven sampling
    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
    data_py, _ = raw_py[0]
    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
    raw_eeglab = raw_eeglab['data']
    data_eeglab = raw_eeglab[0]

    # match upsampling
    upsample = len(data_eeglab) / len(raw_py)
    data_py = np.repeat(data_py, repeats=upsample)
    assert_array_equal(data_py, data_eeglab)

    assert_raises(RuntimeError, read_raw_edf, edf_path, preload=False,
                  stim_channel=-1)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_edf(edf_stim_resamp_path, verbose=True, stim_channel=-1)
    assert_equal(len(w), 2)
    assert_true(any('Events may jitter' in str(ww.message) for ww in w))
    assert_true(any('truncated' in str(ww.message) for ww in w))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw[:]
    assert_equal(len(w), 0)

    events = raw_py.find_edf_events()
    assert_true(len(events) == 0)
Пример #10
0
def test_edf_data():
    """Test edf files."""
    raw = _test_raw_reader(read_raw_edf, input_fname=edf_path,
                           stim_channel=None, exclude=['Ergo-Left', 'H10'],
                           verbose='error')
    raw_py = read_raw_edf(edf_path, stim_channel='auto', preload=True)

    assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
    # Test saving and loading when annotations were parsed.
    edf_events = find_events(raw_py, output='step', shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2],
              [0.3904, 1.0000, 2],
              [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)

    # Test with number of records not in header (-1).
    tempdir = _TempDir()
    broken_fname = op.join(tempdir, 'broken.edf')
    with open(edf_path, 'rb') as fid_in:
        fid_in.seek(0, 2)
        n_bytes = fid_in.tell()
        fid_in.seek(0, 0)
        rbytes = fid_in.read(int(n_bytes * 0.4))
    with open(broken_fname, 'wb') as fid_out:
        fid_out.write(rbytes[:236])
        fid_out.write(b'-1      ')
        fid_out.write(rbytes[244:])
    with pytest.warns(RuntimeWarning,
                      match='records .* not match the file size'):
        raw = read_raw_edf(broken_fname, preload=True, stim_channel='auto')
        read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True,
                     stim_channel='auto')
Пример #11
0
def test_stim_channel():
    """Test reading raw edf files with stim channel."""
    raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
                          preload=True)

    picks = pick_types(raw_py.info, meg=False, eeg=True,
                       exclude=['EDF Annotations'])
    data_py, _ = raw_py[picks]

    print(raw_py)  # to test repr
    print(raw_py.info)  # to test Info repr

    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_eeglab_path)
    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
    data_eeglab = raw_eeglab[picks]

    assert_array_almost_equal(data_py, data_eeglab, 10)
    events = find_edf_events(raw_py)
    assert (len(events) - 1 == len(find_events(raw_py)))  # start not found

    # Test uneven sampling
    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
    data_py, _ = raw_py[0]
    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
    raw_eeglab = raw_eeglab['data']
    data_eeglab = raw_eeglab[0]

    # match upsampling
    upsample = len(data_eeglab) / len(raw_py)
    data_py = np.repeat(data_py, repeats=upsample)
    assert_array_equal(data_py, data_eeglab)

    pytest.raises(RuntimeError, read_raw_edf, edf_path, preload=False,
                  stim_channel=-1)

    with pytest.warns(RuntimeWarning,
                      match='Interpolating stim .* Events may jitter'):
        raw = read_raw_edf(edf_stim_resamp_path, verbose=True, stim_channel=-1)
    with pytest.warns(None) as w:
        raw[:]
    assert len(w) == 0

    events = raw_py.find_edf_events()
    assert (len(events) == 0)
Пример #12
0
def test_find_events_and_events_from_annot_are_the_same():
    """Test that old behaviour and new produce the same events."""
    EXPECTED_EVENTS = [[68, 0, 2], [199, 0, 2], [1024, 0, 3], [1280, 0, 2]]
    raw = read_raw_edf(edf_path, preload=True)
    raw_shell = _get_empty_raw_with_valid_annot(edf_path)
    assert raw_shell.info['meas_date'] == raw.info['meas_date']
    assert raw_shell.info['sfreq'] == raw.info['sfreq']
    assert raw_shell.first_samp == raw.first_samp

    events_from_find_events = find_events(raw)
    assert_array_equal(events_from_find_events, EXPECTED_EVENTS)

    annot = read_annotations_edf(edf_path)
    raw_shell.set_annotations(annot)
    event_id = _get_edf_default_event_id(annot.description)
    event_id.pop('start')
    events_from_EFA, _ = events_from_annotations(raw_shell,
                                                 event_id=event_id,
                                                 use_rounding=False)

    assert_array_equal(events_from_EFA, events_from_find_events)
Пример #13
0
def test_find_events_and_events_from_annot_are_the_same():
    """Test that old behaviour and new produce the same events."""
    EXPECTED_EVENTS = [[68, 0, 2],
                       [199, 0, 2],
                       [1024, 0, 3],
                       [1280, 0, 2]]
    raw = read_raw_edf(edf_path, preload=True, stim_channel='auto')
    raw_shell = _get_empty_raw_with_valid_annot(edf_path)
    assert raw_shell.info['meas_date'] == raw.info['meas_date']
    assert raw_shell.info['sfreq'] == raw.info['sfreq']
    assert raw_shell.first_samp == raw.first_samp

    events_from_find_events = find_events(raw)
    assert_array_equal(events_from_find_events, EXPECTED_EVENTS)

    annot = read_annotations(edf_path)
    raw_shell.set_annotations(annot)
    event_id = _get_edf_default_event_id(annot.description)
    event_id.pop('start')
    events_from_EFA, _ = events_from_annotations(raw_shell, event_id=event_id,
                                                 use_rounding=False)

    assert_array_equal(events_from_EFA, events_from_find_events)
Пример #14
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    baseline=None, preload=True, verbose=False)
    labels = epochs.events[:, -1]

    # cv = KFold(len(labels), 10, shuffle=True, random_state=42)
    epochs_data_train = epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    ###############################################################################
    # Classification with Minimum distance to mean
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    pl = Pipeline([("mdm", mdm)])
    params = {"mdm__metric": [dict(mean='riemann', distance='riemann')]}
    clf = GridSearchCV(pl, params, n_jobs=-1, cv=5, return_train_score=True)
    clf.fit(cov_data_train, labels)
    df = pd.DataFrame(clf.cv_results_)
    return df
# cue onset.
tmin, tmax = -2., 6.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14]  # motor imagery: hands vs feet

raw_files = [
    read_raw_edf(f, preload=True, verbose=False)
    for f in eegbci.load_data(subject, runs)
]
raw = concatenate_raws(raw_files)

# strip channel names
raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]

events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

raw.filter(7., 35., method='iir', picks=picks)

epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
Пример #16
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    if subject in EXCLUDE_SUBJECTS:
        return

    tmin, tmax = -1., 4.
    weights = np.arange(0.1, 1.0, 0.1)

    for weight in weights:
        first_sub = 2 if subject == 1 else 1
        raw = get_raw(subject, runs)
        scores = []
        for i in range(first_sub, TRANS_SUBJECT_COUNT):
            print(i)
            if i == subject or (i in EXCLUDE_SUBJECTS):
                continue
            raw.append(get_raw(i, runs))

            events = find_events(raw, shortest_event=0, stim_channel='STI 014')
            epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                            baseline=None, preload=True, verbose=False)
            labels = epochs.events[:, -1]
            epochs_data_train = 1e6*epochs.get_data()[:, :-1]
            cov_data_train = Covariances().transform(epochs_data_train)

            target_sample_weight_base = np.ones(EPOCH_COUNT)*weight
            others_sample_weight_base = np.ones(
                len(epochs)-EPOCH_COUNT)*(1.-weight)
            sample_weight = np.hstack(
                (target_sample_weight_base, others_sample_weight_base))

            others_size = others_sample_weight_base.size
            others_index = np.arange(others_size)

            mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
            cv = KFold(n_splits=5, shuffle=True, random_state=42)

            train_scores = []
            test_scores = []
            dumy_array = np.ones(EPOCH_COUNT)

            for train_index, test_index in cv.split(dumy_array):
                train_index = np.hstack(
                    (others_index, train_index+others_size))
                x = cov_data_train[train_index]
                y = labels[train_index]
                mdm.fit(x, y, sample_weight=sample_weight[train_index])

                score = (mdm.predict(x) == y).sum()/len(train_index)
                train_scores.append(score)

                test_index = test_index + others_size
                y = mdm.predict(cov_data_train[test_index])
                score = (y == labels[test_index]).sum()/len(test_index)
                test_scores.append(score)

            train_score = np.mean(train_scores)
            test_score = np.mean(test_scores)
            scores.append([subject, i, train_score, test_score])
        df = pd.DataFrame(
            scores, columns=["subject", "transfer_count", "train_score", "test_score"])
        df.to_excel("data/riemann/gradually/test_subject_%d_weight_%e.xlsx" %
                    (subject, weight), index=False)
Пример #17
0
def get_score(subject=7):
    ###############################################################################
    # Set parameters and read data

    # avoid classification of evoked responses by using epochs that start 1s after
    # cue onset.
    tmin, tmax = 1., 2.
    event_id = dict(hands=2, feet=3)

    runs = [6, 10, 14]  # motor imagery: hands vs feet

    raw_files = [
        read_raw_edf(f, preload=True) for f in eegbci.load_data(subject, runs)
    ]
    raw = concatenate_raws(raw_files)

    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')
    # subsample elecs
    picks = picks[::2]

    # Apply band-pass filter
    raw.filter(7., 35., method='iir', picks=picks)

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    verbose=False)
    labels = epochs.events[:, -1] - 2

    # cross validation
    cv = KFold(len(labels), 10, shuffle=True, random_state=42)
    # get epochs
    epochs_data_train = 1e6 * epochs.get_data()

    # compute covariance matrices
    cov_data_train = Covariances().transform(epochs_data_train)

    ###############################################################################
    # Classification with Minimum distance to mean
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))

    # Use scikit-learn Pipeline with cross_val_score function
    mdm.fit(cov_data_train, labels)

    print(123)

    ###############################################################################
    # Classification with Tangent Space Logistic Regression
    clf = TSclassifier()
    # Use scikit-learn Pipeline with cross_val_score function
    scores = cross_val_score(clf, cov_data_train, labels, cv=cv, n_jobs=1)

    # Printing the results
    class_balance = np.mean(labels == labels[0])
    class_balance = max(class_balance, 1. - class_balance)
    ts_score = np.mean(scores)
    print("Tangent space Classification accuracy: %f / Chance level: %f" %
          (ts_score, class_balance))

    ###############################################################################

    return [subject, mdm_score, ts_score]
Пример #18
0
    def edf_loader(self, single_file):
        raw = read_raw_edf(single_file, preload=True)
        # print "raw: "
        # print raw
        # print "number of channels: %d, type: %s, raw.ch_names: " % (len(raw.ch_names), type(raw.ch_names))
        # print raw.ch_names
        start, stop = raw.time_as_index([0, 150])
        ch_start = 2
        ch_end = 16
        data, times = raw[ch_start:ch_end, start:stop]
        # print data.shape
        # print type(data)
        # print raw.ch_names[2:16]
        # print "TTTTTTTTTTTTTTTTTTTTTTT"
        # print type(times)
        # print np.shape(times)
        # print times
        # print "TTTTTTTTTTTTTTTTTTTTTTT"
        cha_names = []
        for n in raw.ch_names:
            str_n = str(n)
            # str_n = str(unicodedata.normalize('NFKD', n).encode('ascii', 'ignore'))
            cha_names.append(str_n)
        # print type(raw.ch_names[0])
        # print "cha_names: "
        # print cha_names
        data_dict = {i[0]: i[1:] for i in zip(cha_names[2:16], data)}
        # print "data: "
        # print data
        # print "data_dict['AF3']: "
        # print len(data_dict['AF3'][0])
        d_dict = {}
        for k, v in data_dict.iteritems():
            d_dict[k] = v[0]
        # print "d_dict['AF3']:"
        # print len(d_dict['AF3'])
        raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]
        raw.filter(2., 30., method='iir')
        events = find_events(raw, shortest_event=0, stim_channel='STI 014')
        print "events: "
        print events
        picks = pick_types(raw.info,
                           meg=False,
                           eeg=True,
                           stim=False,
                           exclude='bads')
        """
		epochs = Epochs(raw, events, self.event_id, self.tmin, self.tmax, proj=True, picks=picks, baseline=None, preload=True, add_eeg_ref=False)
		epoch_train = epochs.crop(tmin=1., tmax=2., copy=True)
		labels = epochs.event[:, -1] - 2
		"""
        # raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]

        # epochs = Epochs()
        # print "picks: "
        # print picks
        # print raw[picks]
        # print type(raw)
        # print raw.info
        # print d_dict
        # print d_dict.keys()
        return d_dict
Пример #19
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    # learn all suject exclude target subject. #############################
    first_sub = 2 if subject == 1 else 1
    raw = get_raw(first_sub, runs)
    for i in range(first_sub + 1, 3):
        if i != subject and not (i in [88, 89, 92, 100]):
            # print(i)
            raw.append(get_raw(i, runs))
    raw.append(get_raw(subject, runs))

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    baseline=None,
                    preload=True,
                    verbose=False)

    labels = epochs.events[:, -1]
    epochs_data_train = 1e6 * epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    weights = np.arange(0.1, 1.0, 0.1)
    scores = []
    for weight in weights:
        mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
        others_sample_weight_base = np.ones(len(epochs) -
                                            EPOCH_COUNT) * (1. - weight)
        target_sample_weight_base = np.ones(EPOCH_COUNT) * weight
        sample_weight = np.hstack(
            (others_sample_weight_base, target_sample_weight_base))

        others_size = others_sample_weight_base.size
        others_index = np.arange(others_size)

        cv = KFold(n_splits=5, shuffle=True, random_state=42)
        train_scores = []
        test_scores = []
        dumy_array = np.ones(EPOCH_COUNT)
        for train_index, test_index in cv.split(dumy_array):
            train_index = np.hstack((others_index, train_index + others_size))
            x = cov_data_train[train_index]
            y = labels[train_index]
            mdm.fit(x, y, sample_weight=sample_weight[train_index])
            score = (mdm.predict(x) == y).sum() / len(train_index)
            train_scores.append(score)

            test_index = test_index + others_size
            y = mdm.predict(cov_data_train[test_index])
            score = (y == labels[test_index]).sum() / len(test_index)
            test_scores.append(score)

        train_score = np.mean(train_scores)
        test_score = np.mean(test_scores)
        scores.append([subject, weight, train_score, test_score])
        # print("train:%s test:%s" % (train_score, test_score))
    return scores
Пример #20
0
for s,subject in enumerate(subjects):
    
    print('Processing Subject %s' %(subject))
    raw_files = [read_raw_edf(f, preload=True,verbose=False) for f in eegbci.load_data(subject, runs)]
    raw = concatenate_raws(raw_files)

    picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                       exclude='bads')
    # subsample elecs
    picks = picks[::2]
    
    # Apply band-pass filter
    raw.filter(7., 35., method='iir',picks=picks)
    
    events = find_events(raw, shortest_event=0, stim_channel='STI 014',verbose=False)



    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                    baseline=None, preload=True, add_eeg_ref=False,verbose = False)
                    
    labels = epochs.events[:, -1] - 2
    X = epochs.get_data()
    
    for i,c in enumerate(classifiers):
        
        r = cross_val_score(classifiers[c],X,labels,cv=10,n_jobs=-1)
        results[s,i] = np.mean(r)
Пример #21
0
    def get_event_data(self,
                       event_id,
                       tmin,
                       tmax,
                       picks=None,
                       stim_channel=None,
                       min_duration=0):
        """Simulate the data for a particular event-id.

        The epochs corresponding to a particular event-id are returned. The
        method remembers the epoch that was returned in the previous call and
        returns the next epoch in sequence. Once all epochs corresponding to
        an event-id have been exhausted, the method returns None.

        Parameters
        ----------
        event_id : int
            The id of the event to consider.
        tmin : float
            Start time before event.
        tmax : float
            End time after event.
        %(picks_all)s
        stim_channel : None | string | list of string
            Name of the stim channel or all the stim channels
            affected by the trigger. If None, the config variables
            'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
            etc. are read. If these are not found, it will default to
            'STI 014'.
        min_duration : float
            The minimum duration of a change in the events channel required
            to consider it as an event (in seconds).

        Returns
        -------
        data : 2D array with shape [n_channels, n_times]
            The epochs that are being simulated
        """
        # Get the list of all events
        picks = _picks_to_idx(self.info, picks, 'all', exclude=())
        events = find_events(self.raw,
                             stim_channel=stim_channel,
                             verbose=False,
                             output='onset',
                             consecutive='increasing',
                             min_duration=min_duration)

        # Get the list of only the specified event
        idx = np.where(events[:, -1] == event_id)[0]
        event_samp = events[idx, 0]

        # Only do this the first time for each event type
        if event_id not in self._current:

            # Initialize pointer for the event to 0
            self._current[event_id] = 0
            self._last[event_id] = len(event_samp)

        # relative start and stop positions in samples
        tmin_samp = int(round(self.info['sfreq'] * tmin))
        tmax_samp = int(round(self.info['sfreq'] * tmax)) + 1

        if self._current[event_id] < self._last[event_id]:

            # Select the current event from the events list
            ev_samp = event_samp[self._current[event_id]]

            # absolute start and stop positions in samples
            start = ev_samp + tmin_samp - self.raw.first_samp
            stop = ev_samp + tmax_samp - self.raw.first_samp

            self._current[event_id] += 1  # increment pointer

            data, _ = self.raw[picks, start:stop]

            return data

        else:
            return None
Пример #22
0
# cue onset.
tmin, tmax = 1., 3.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14]  # motor imagery: hands vs feet

raw_files = [read_raw_edf(f, preload=True,verbose=False) for f in eegbci.load_data(subject, runs) ]
raw = concatenate_raws(raw_files)

# strip channel names
raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]

# Apply band-pass filter
raw.filter(7., 35., method='iir')

events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True, add_eeg_ref=False,verbose=False)
labels = epochs.events[:, -1] - 2

# get epochs
epochs_data = epochs.get_data()

# compute covariance matrices
covmats = Covariances().fit_transform(epochs_data) 
Пример #23
0
    raw = concatenate_raws(raw_files)

    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')
    # subsample elecs
    picks = picks[::2]

    # Apply band-pass filter
    raw.filter(7., 35., method='iir', picks=picks)

    events = find_events(raw,
                         shortest_event=0,
                         stim_channel='STI 014',
                         verbose=False)

    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    add_eeg_ref=False,
                    verbose=False)