コード例 #1
0
def test_filter_picks():
    """Test filtering default channel picks"""
    ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim']
    info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
    raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)

    # -- Deal with meg mag grad exception
    ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg')

    # -- Filter data channels
    for ch_type in ('mag', 'grad', 'eeg', 'seeg'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
        raw_ = raw.pick_types(copy=True, **picks)
        # Avoid RuntimeWarning due to Attenuation
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            raw_.filter(10, 30)
            assert_true(len(w) == 1)

    # -- Error if no data channel
    for ch_type in ('misc', 'stim'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        raw_ = raw.pick_types(copy=True, **picks)
        assert_raises(RuntimeError, raw_.filter, 10, 30)
コード例 #2
0
def test_filter_picks():
    """Test filtering default channel picks"""
    ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim']
    info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
    raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)

    # -- Deal with meg mag grad exception
    ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg')

    # -- Filter data channels
    for ch_type in ('mag', 'grad', 'eeg', 'seeg'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
        raw_ = raw.pick_types(copy=True, **picks)
        # Avoid RuntimeWarning due to Attenuation
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            raw_.filter(10, 30)
            assert_true(len(w) == 1)

    # -- Error if no data channel
    for ch_type in ('misc', 'stim'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        raw_ = raw.pick_types(copy=True, **picks)
        assert_raises(RuntimeError, raw_.filter, 10, 30)
コード例 #3
0
ファイル: PortalNode.py プロジェクト: gwthompson/PyPortal-2
 def _update(self):
     mne_info = self.traverse_back_and_find('mne_info')
     input_array = self.input_node.output
     if self.mode == 'train':
         raw_array = RawArray(input_array, mne_info, verbose='ERROR')
         raw_array.pick_types(eeg=True,
                              meg=False,
                              stim=False,
                              exclude='bads')
         if (self.PortalWorker.params.preproc == 'adafreq'):
             self.PortalWorker.params.setWinSizeAndStep(winSize=2, step=1)
             [comps, diaps] = autoSelectComps(self.PortalWorker.params,
                                              list([raw_array]),
                                              self.trainTriggersAllClasses,
                                              self.trainOnsetsAllClasses,
                                              self.trainFinOnsetsAllClasses,
                                              compThresh=0.65,
                                              minCompsNum=5)
             self.PortalWorker.params.winSize = self.PortalParams.winSize
             self.PortalWorker.params.step = self.PortalParams.step
             self.PortalWorker.params.channelSelect = comps
             self.PortalWorker.params.fDiaps = diaps
         self.PortalWorker.train(list([raw_array]),
                                 self.trainTriggers,
                                 self.trainOnsets,
                                 self.finTrainOnsets,
                                 doBalanceLabels=True)
     if (self.mode == 'test'):
         self.output = self.PortalWorker.processChunk(input_array)
コード例 #4
0
def test_picks_by_channels():
    """Test creating pick_lists."""
    rng = np.random.RandomState(909)

    test_data = rng.random_sample((4, 2000))
    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
    ch_types = ['grad', 'mag', 'mag', 'eeg']
    sfreq = 250.0
    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    _assert_channel_types(info)
    raw = RawArray(test_data, info)

    pick_list = _picks_by_type(raw.info)
    assert_equal(len(pick_list), 3)
    assert_equal(pick_list[0][0], 'mag')
    pick_list2 = _picks_by_type(raw.info, meg_combined=False)
    assert_equal(len(pick_list), len(pick_list2))
    assert_equal(pick_list2[0][0], 'mag')

    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
    assert_equal(len(pick_list), len(pick_list2) + 1)
    assert_equal(pick_list2[0][0], 'meg')

    test_data = rng.random_sample((4, 2000))
    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
    ch_types = ['mag', 'mag', 'mag', 'mag']
    sfreq = 250.0
    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    raw = RawArray(test_data, info)
    # This acts as a set, not an order
    assert_array_equal(pick_channels(info['ch_names'], ['MEG 002', 'MEG 001']),
                       [0, 1])

    # Make sure checks for list input work.
    pytest.raises(ValueError, pick_channels, ch_names, 'MEG 001')
    pytest.raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi')

    pick_list = _picks_by_type(raw.info)
    assert_equal(len(pick_list), 1)
    assert_equal(pick_list[0][0], 'mag')
    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
    assert_equal(len(pick_list), len(pick_list2))
    assert_equal(pick_list2[0][0], 'mag')

    # pick_types type check
    with pytest.raises(ValueError, match='must be of type'):
        raw.pick_types(eeg='string')

    # duplicate check
    names = ['MEG 002', 'MEG 002']
    assert len(pick_channels(raw.info['ch_names'], names)) == 1
    assert len(raw.copy().pick_channels(names)[0][0]) == 1
コード例 #5
0
ファイル: test_event.py プロジェクト: uge-lescot/mne-python
def test_find_events():
    """Test find events in raw file."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname, preload=True)
    # let's test the defaulting behavior while we're at it
    extra_ends = ['', '_1']
    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
    if 'MNE_STIM_CHANNEL_1' in os.environ:
        del os.environ['MNE_STIM_CHANNEL_1']
    events2 = find_events(raw)
    assert_array_almost_equal(events, events2)
    # now test with mask
    events11 = find_events(raw, mask=3, mask_type='not_and')
    with pytest.warns(RuntimeWarning, match='events masked'):
        events22 = read_events(fname, mask=3, mask_type='not_and')
    assert_array_equal(events11, events22)

    # Reset some data for ease of comparison
    raw._first_samps[0] = 0
    raw.info['sfreq'] = 1000

    stim_channel = 'STI 014'
    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                     include=[stim_channel])

    # test digital masking
    raw._data[stim_channel_idx, :5] = np.arange(5)
    raw._data[stim_channel_idx, 5:] = 0
    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'

    pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
    pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
    # testing mask_type. default = 'not_and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='not_and'),
        [[2, 0, 2], [4, 2, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='not_and'),
        [[1, 0, 1], [3, 0, 1], [4, 1, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='not_and'),
        [[4, 0, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='not_and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    # testing with mask_type = 'and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='and'),
        [[1, 0, 1], [3, 0, 1]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='and'),
        [[2, 0, 2]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='and'),
        [[4, 0, 4]])

    # test empty events channel
    raw._data[stim_channel_idx, :] = 0
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, :4] = 1
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, -1:] = 9
    assert_array_equal(find_events(raw), [[14399, 0, 9]])

    # Test that we can handle consecutive events with no gap
    raw._data[stim_channel_idx, 10:20] = 5
    raw._data[stim_channel_idx, 20:30] = 6
    raw._data[stim_channel_idx, 30:32] = 5
    raw._data[stim_channel_idx, 40] = 6

    assert_array_equal(find_events(raw, consecutive=False),
                       [[10, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, consecutive=True),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw),
                       [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False),
                       [[31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True),
        [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    pytest.raises(ValueError,
                  find_events,
                  raw,
                  output='step',
                  consecutive=True)
    assert_array_equal(
        find_events(raw, output='step', consecutive=True, shortest_event=1),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6],
         [41, 6, 0], [14399, 0, 9], [14400, 9, 0]])
    assert_array_equal(find_events(raw, output='offset'),
                       [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
                       [[10, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
                       [[10, 0, 5], [20, 5, 6], [30, 6, 5]])
    assert_array_equal(
        find_events(raw,
                    output='offset',
                    consecutive=False,
                    min_duration=0.002), [[31, 0, 5]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True,
                    min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
                       [[10, 0, 5], [20, 5, 6]])

    # test find_stim_steps merge parameter
    raw._data[stim_channel_idx, :] = 0
    raw._data[stim_channel_idx, 0] = 1
    raw._data[stim_channel_idx, 10] = 4
    raw._data[stim_channel_idx, 11:20] = 5
    assert_array_equal(
        find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel),
        [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=-1, stim_channel=stim_channel),
        [[1, 1, 0], [10, 0, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=1, stim_channel=stim_channel),
        [[1, 1, 0], [11, 0, 5], [20, 5, 0]])

    # put back the env vars we trampled on
    for s, o in zip(extra_ends, orig_envs):
        if o is not None:
            os.environ['MNE_STIM_CHANNEL%s' % s] = o

    # Test with list of stim channels
    raw._data[stim_channel_idx, 1:101] = np.zeros(100)
    raw._data[stim_channel_idx, 10:11] = 1
    raw._data[stim_channel_idx, 30:31] = 3
    stim_channel2 = 'STI 015'
    stim_channel2_idx = pick_channels(raw.info['ch_names'],
                                      include=[stim_channel2])
    raw._data[stim_channel2_idx, :] = 0
    raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
    events1 = find_events(raw, stim_channel='STI 014')
    events2 = events1.copy()
    events2[:, 0] -= 5
    events = find_events(raw, stim_channel=['STI 014', stim_channel2])
    assert_array_equal(events[::2], events2)
    assert_array_equal(events[1::2], events1)

    # test initial_event argument
    info = create_info(['MYSTI'], 1000, 'stim')
    data = np.zeros((1, 1000))
    raw = RawArray(data, info)
    data[0, :10] = 100
    data[0, 30:40] = 200
    assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
    assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
                       [[0, 0, 100], [30, 0, 200]])

    # test error message for raw without stim channels
    raw = read_raw_fif(raw_fname, preload=True)
    raw.pick_types(meg=True, stim=False)
    # raw does not have annotations
    with pytest.raises(ValueError, match="'stim_channel'"):
        find_events(raw)
    # if raw has annotations, we show a different error message
    raw.set_annotations(Annotations(0, 2, "test"))
    with pytest.raises(ValueError, match="mne.events_from_annotations"):
        find_events(raw)
コード例 #6
0
def main():
    print "Using MNE", mne.__version__

    opts = parse_args()
    verbose = opts.debug

    # constants
    sfreq = 125.0
    class_labels = {'left': 2, 'right': 3}

    # files
    train_fname = "data/custom/trials/motor-imagery-subject-A-train-1.csv"
    test_fname = "data/custom/trials/motor-imagery-subject-A-test-1.csv"
    #train_fname = "data/custom/trials/motor-imagery-trial-subject-A-10-26-2016_01-54-59.csv"
    #train_fname = "data/custom/bci4/train/ds1g.txt"
    #test_fname = "data/custom/bci4/test/ds1g.txt"
    #train_fname = "data/custom/bci4/active_train/ds1b.txt"
    #test_fname = "data/custom/bci4/active_test/ds1b.txt"

    #################
    # LOAD DATA

    eval_start = time.clock()
    # load train data from training file
    [train_nparray, train_info] = file_to_nparray(train_fname,
                                                  sfreq=sfreq,
                                                  verbose=verbose)
    end = time.clock()
    print "train dataset", train_fname, "loaded in ", str(
        end - eval_start), "seconds"

    eval_start = time.clock()
    # load test data from test file
    [test_nparray, test_info] = file_to_nparray(test_fname,
                                                sfreq=sfreq,
                                                verbose=verbose)
    end = time.clock()
    print "test dataset", test_fname, "loaded in ", str(end -
                                                        eval_start), "seconds"

    total_start = time.clock()

    ##################
    # CLASSIFY DATA

    # pick a subset of total electrodes, or else just get all of the channels of type 'eeg'
    picks = getPicks('openbci16') or pick_types(train_info, eeg=True)

    # hyperparam 1
    bandpass_filters = get_bandpass_ranges()

    # hyperparam 2
    epoch_bounds = get_window_ranges()

    # extract X,y from train data
    train_raw = RawArray(train_nparray, train_info, verbose=verbose)
    train_events = mne.find_events(train_raw,
                                   shortest_event=0,
                                   consecutive=True,
                                   verbose=verbose)
    train_epochs = Epochs(raw=train_raw,
                          events=train_events,
                          event_id=class_labels,
                          tmin=-0.5,
                          tmax=3.5,
                          proj=False,
                          picks=picks,
                          baseline=None,
                          preload=True,
                          add_eeg_ref=False,
                          verbose=verbose)
    train_X = train_epochs.get_data()
    train_y = train_epochs.events[:, -1] - 2  # convert classes [2,3] to [0,1]

    # extract X,y from test data
    test_raw = RawArray(test_nparray, test_info, verbose=verbose)
    test_events = mne.find_events(test_raw,
                                  shortest_event=0,
                                  consecutive=True,
                                  verbose=verbose)
    test_epochs = Epochs(raw=test_raw,
                         events=test_events,
                         event_id=class_labels,
                         tmin=-0.5,
                         tmax=3.5,
                         proj=False,
                         picks=picks,
                         baseline=None,
                         preload=True,
                         add_eeg_ref=False,
                         verbose=verbose)
    test_X = test_epochs.get_data()
    test_y = test_epochs.events[:, -1] - 2  # convert classes [2,3] to [0,1]

    # custom grid search
    estimator1 = CSPEstimator(bandpass_filters=bandpass_filters,
                              epoch_bounds=epoch_bounds,
                              num_spatial_filters=6,
                              class_labels=class_labels,
                              sfreq=sfreq,
                              picks=picks,
                              num_votes=6,
                              consecutive=True)
    estimator1.fit(train_X, train_y)

    #
    exit()

    print
    # print

    print "-------------------------------------------"
    print "-------------------------------------------"
    print "-------------------------------------------"
    print "-------------------------------------------"
    print
    time.sleep(10)

    # custom grid search
    estimator2 = CSPEstimator(bandpass_filters=bandpass_filters,
                              epoch_bounds=epoch_bounds,
                              num_spatial_filters=6,
                              class_labels=class_labels,
                              sfreq=sfreq,
                              picks=picks,
                              num_votes=6,
                              consecutive=True)
    estimator2.fit(train_X, train_y, type="lr")

    #
    print "-------------------------------------------"
    print "LDA"
    score = estimator1.score(test_X, test_y)
    print "average estimator score", score
    print "-------------------------------------------"
    print "LOGISTIC REGRESSION"
    score = estimator2.score(test_X, test_y)
    print "average estimator score", score

    print "training run time", round(time.clock() - total_start, 1), "sec"
    exit()

    # just a pause here to allow visual inspection of top classifiers picked by grid search
    time.sleep(15)

    # now we go into predict mode, in which we are going over the test data using sliding windows
    # this is a simulation of what would happen if we were in "online" mode with live data
    # for each window, a prediction is given by the ensemble of top classifiers
    # next to this, we see the actual labels from the real data (i.e. the y vector)
    print "-------------------------------------------"
    print "PREDICT"
    print

    ####################################################
    # looping over test data in windows
    online_data = test_raw._data[picks]
    online_labels = test_raw.pick_types(stim=True)._data
    print "test_X", test_X.shape
    print "test RAW data", online_data.shape
    print "test RAW labels", online_labels.shape
    window_size = 150  # 50 sample = 0.5 s
    window_overlap = 150  #

    np.set_printoptions(suppress=True)
    for i in xrange(0, online_data.shape[1] - window_size, window_overlap):
        start = i
        end = i + window_size
        window = online_data[:, start:end]
        class_labels = online_labels[:, start:end]
        #print window.shape
        #print class_labels
        estimator.predict(window, class_labels)
        #print i,":",i+window_size
    exit()

    estimator.predict(test_X[0:10], test_y[0:10])
    print
    print "total run time", round(time.clock() - total_start, 1), "sec"
    exit()
コード例 #7
0
def main():
    print "Using MNE", mne.__version__

    opts = parse_args()
    verbose = opts.debug

    # constants
    sfreq = 100.0
    class_labels = {'left': 2, 'right': 3}

    # files
    train_fname = "data/custom/bci4/train/ds1g.txt"
    test_fname = "data/custom/bci4/test/ds1g.txt"
    #train_fname = "data/custom/bci4/active_train/ds1g.txt"
    #test_fname = "data/custom/bci4/active_test/ds1g.txt"

    #################
    # LOAD DATA

    eval_start = time.clock()
    # load train data from training file
    [train_nparray, train_info] = file_to_nparray(train_fname,
                                                  sfreq=sfreq,
                                                  verbose=verbose)
    end = time.clock()
    print "train dataset", train_fname, "loaded in ", str(
        end - eval_start), "seconds"

    eval_start = time.clock()
    # load test data from test file
    [test_nparray, test_info] = file_to_nparray(test_fname,
                                                sfreq=sfreq,
                                                verbose=verbose)
    end = time.clock()
    print "test dataset", test_fname, "loaded in ", str(end -
                                                        eval_start), "seconds"

    total_start = time.clock()

    ##################
    # CLASSIFY DATA

    # pick a subset of total electrodes, or else just get all of the channels of type 'eeg'
    picks = getPicks('motor16') or pick_types(train_info, eeg=True)

    # hyperparam 1
    bandpass_filters = get_bandpass_ranges()

    # hyperparam 2
    epoch_bounds = get_window_ranges()

    # extract X,y from train data
    train_raw = RawArray(train_nparray, train_info, verbose=verbose)
    train_events = mne.find_events(train_raw,
                                   shortest_event=0,
                                   consecutive=True,
                                   verbose=verbose)
    train_epochs = Epochs(raw=train_raw,
                          events=train_events,
                          event_id=class_labels,
                          tmin=-0.5,
                          tmax=3.5,
                          proj=False,
                          picks=picks,
                          baseline=None,
                          preload=True,
                          add_eeg_ref=False,
                          verbose=verbose)
    train_X = train_epochs.get_data()
    train_y = train_epochs.events[:, -1] - 2  # convert classes [2,3] to [0,1]

    # extract X,y from test data
    test_raw = RawArray(test_nparray, test_info, verbose=verbose)
    test_events = mne.find_events(test_raw,
                                  shortest_event=0,
                                  consecutive=True,
                                  verbose=verbose)
    test_epochs = Epochs(raw=test_raw,
                         events=test_events,
                         event_id=class_labels,
                         tmin=-0.5,
                         tmax=3.5,
                         proj=False,
                         picks=picks,
                         baseline=None,
                         preload=True,
                         add_eeg_ref=False,
                         verbose=verbose)
    test_X = test_epochs.get_data()
    test_y = test_epochs.events[:, -1] - 2  # convert classes [2,3] to [0,1]

    # custom grid search
    estimator = CSPEstimator(bandpass_filters=bandpass_filters,
                             epoch_bounds=epoch_bounds,
                             num_spatial_filters=6,
                             class_labels=class_labels,
                             sfreq=sfreq,
                             picks=picks,
                             num_votes=6,
                             consecutive=True)
    estimator.fit(train_X, train_y)

    #
    print "-------------------------------------------"
    score = estimator.score(test_X, test_y)
    print "-------------------------------------------"
    print "average estimator score", score
    print
    # print

    print "-------------------------------------------"
    print
    print "training run time", round(time.clock() - total_start, 1), "sec"
    exit()

    # just a pause here to allow visual inspection of top classifiers picked by grid search
    time.sleep(15)

    # """
    # Riemannian estimations
    # """
    # # preprocess
    #
    # # trim vars
    # epoch_trim_start = 1.0
    # epoch_trim_end = 2.0
    # # bandpass filter coefficients
    # b, a = butter(5, np.array([7.0,35.0])/(100*0.5), 'bandpass')
    # # cross validation
    # cv = KFold(len(train_y), 10, shuffle=True, random_state=42)
    #
    # # filter and crop TRAINING SET
    # train_X = preprocess_X(train_X, b, a, epoch_trim_start, epoch_trim_end, sfreq)
    #
    # # filter and crop TEST SET
    # test_X = preprocess_X(test_X, b, a, epoch_trim_start, epoch_trim_end, sfreq)
    #
    #
    # """
    # Covariance computation
    # """
    # # compute covariance matrices
    # cov_data_train = covariances(X=train_X)
    # cov_data_test = covariances(X=test_X)
    #
    # """
    # Classification with Minimum distance to mean (MDM)
    # """
    # mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    # # Use scikit-learn Pipeline with cross_val_score function
    # scores = cross_val_score(mdm, cov_data_train, train_y, cv=cv, n_jobs=1)
    # # Printing the results
    # print "MDM Classification accuracy:",np.mean(scores)
    # print scores
    #
    # """
    # Classification with Tangent Space Logistic Regression
    # """
    # clf = TSclassifier()
    # # Use scikit-learn Pipeline with cross_val_score function
    # scores = cross_val_score(clf, cov_data_train, train_y, cv=cv, n_jobs=1)
    # # Printing the results
    # print "Tangent space Classification accuracy:",np.mean(scores)
    # print scores
    #
    # print "--------------------------------------"
    # print
    #
    # clf1 = MDM(metric=dict(mean='riemann', distance='riemann'))
    # clf1.fit(cov_data_train, train_y)
    # score1 = clf1.score(cov_data_test, test_y)
    # print "MDM prediction score:",score1
    #
    # clf2 = TSclassifier()
    # clf2.fit(cov_data_train, train_y)
    # score2 = clf2.score(cov_data_test, test_y)
    # print "Tangent space w/LR score:", score2
    #
    # clf3 = Potato()
    # clf3.fit(cov_data_train[np.where(train_y==0)])
    # score3 = clf3.score(cov_data_test, test_y)
    # print "Potato score:", score3
    #
    #
    # exit()

    # now we go into predict mode, in which we are going over the test data using sliding windows
    # this is a simulation of what would happen if we were in "online" mode with live data
    # for each window, a prediction is given by the ensemble of top classifiers
    # next to this, we see the actual labels from the real data (i.e. the y vector)
    print "-------------------------------------------"
    print "PREDICT"
    print

    ####################################################
    # looping over test data in windows
    online_data = test_raw._data[picks]
    online_labels = test_raw.pick_types(stim=True)._data
    print "test_X", test_X.shape
    print "test RAW data", online_data.shape
    print "test RAW labels", online_labels.shape
    window_size = 150  # 50 sample = 0.5 s
    window_overlap = 50  #

    np.set_printoptions(suppress=True)
    for i in xrange(0, online_data.shape[1] - window_size, window_overlap):
        start = i
        end = i + window_size
        window = online_data[:, start:end]
        class_labels = online_labels[:, start:end]
        #print window.shape
        #print class_labels
        estimator.predict(window, class_labels)
        #print i,":",i+window_size

    exit()

    estimator.predict(test_X[0:10], test_y[0:10])
    print
    print "total run time", round(time.clock() - total_start, 1), "sec"
    exit()
コード例 #8
0
def main():
	print "Using MNE", mne.__version__

	opts = parse_args()
	verbose = opts.debug

	# constants
	sfreq = 100.0
	class_labels = {'left':2, 'right':3}

	# files
	train_fname = "data/custom/bci4/train/ds1g.txt"
	test_fname = "data/custom/bci4/test/ds1g.txt"
	#train_fname = "data/custom/bci4/active_train/ds1b.txt"
	#test_fname = "data/custom/bci4/active_test/ds1b.txt"

	#################
	# LOAD DATA

	eval_start = time.clock()
	# load train data from training file
	[train_nparray, train_info] = file_to_nparray(train_fname, sfreq=sfreq, verbose=verbose)
	end = time.clock()
	print "train dataset", train_fname, "loaded in ", str(end - eval_start),"seconds"

	eval_start = time.clock()
	# load test data from test file
	[test_nparray, test_info] = file_to_nparray(test_fname, sfreq=sfreq, verbose=verbose)
	end = time.clock()
	print "test dataset", test_fname, "loaded in ", str(end - eval_start),"seconds"

	total_start = time.clock()

	##################
	# CLASSIFY DATA

	# pick a subset of total electrodes, or else just get all of the channels of type 'eeg'
	picks = getPicks('motor16') or pick_types(train_info, eeg=True)

	# hyperparam 1
	bandpass_filters = get_bandpass_ranges()

	# hyperparam 2
	epoch_bounds = get_window_ranges()

	# extract X,y from train data
	train_raw = RawArray(train_nparray, train_info, verbose=verbose)
	train_events = mne.find_events(train_raw, shortest_event=0, consecutive=True, verbose=verbose)
	train_epochs = Epochs(raw=train_raw, events=train_events, event_id=class_labels,
	                      tmin=-0.5, tmax=3.5, proj=False, picks=picks, baseline=None,
	                      preload=True, add_eeg_ref=False, verbose=verbose)
	train_X = train_epochs.get_data()
	train_y = train_epochs.events[:, -1] - 2    # convert classes [2,3] to [0,1]

	# extract X,y from test data
	test_raw = RawArray(test_nparray, test_info, verbose=verbose)
	test_events = mne.find_events(test_raw, shortest_event=0, consecutive=True, verbose=verbose)
	test_epochs = Epochs(raw=test_raw, events=test_events, event_id=class_labels,
	                     tmin=-0.5, tmax=3.5, proj=False, picks=picks, baseline=None,
	                     preload=True, add_eeg_ref=False, verbose=verbose)
	test_X = test_epochs.get_data()
	test_y = test_epochs.events[:, -1] - 2      # convert classes [2,3] to [0,1]



	# custom grid search
	estimator = CSPEstimator(bandpass_filters=bandpass_filters,
               epoch_bounds=epoch_bounds,
               num_spatial_filters=6,
               class_labels=class_labels,
               sfreq=sfreq,
               picks=picks,
               num_votes=6,
               consecutive=True)
	estimator.fit(train_X,train_y)

	#
	print "-------------------------------------------"
	score = estimator.score(test_X,test_y)
	print "-------------------------------------------"
	print "average estimator score",score
	print
	# print

	print "-------------------------------------------"
	print
	print "training run time", round(time.clock() - total_start,1),"sec"
	#exit()

	# just a pause here to allow visual inspection of top classifiers picked by grid search
	time.sleep(15)


	# now we go into predict mode, in which we are going over the test data using sliding windows
	# this is a simulation of what would happen if we were in "online" mode with live data
	# for each window, a prediction is given by the ensemble of top classifiers
	# next to this, we see the actual labels from the real data (i.e. the y vector)
	print "-------------------------------------------"
	print "PREDICT"
	print


	####################################################
	# looping over test data in windows
	online_data = test_raw._data[picks]
	online_labels = test_raw.pick_types(stim=True)._data
	print "test_X", test_X.shape
	print "test RAW data",online_data.shape
	print "test RAW labels",online_labels.shape
	window_size = 150 # 50 sample = 0.5 s
	window_overlap = 50 #

	np.set_printoptions(suppress=True)
	for i in xrange(0, online_data.shape[1]-window_size, window_overlap):
		start = i
		end = i + window_size
		window = online_data[:,start:end]
		class_labels = online_labels[:,start:end]
		#print window.shape
		#print class_labels
		estimator.predict(window, class_labels)
		#print i,":",i+window_size
	exit()





	estimator.predict(test_X[0:10], test_y[0:10])
	print
	print "total run time", round(time.clock() - total_start,1),"sec"
	exit()