def test_select_classes(self):
     """Selecting Epochs."""
     # normal case
     dat = select_classes(self.dat, [0])
     self.assertEqual(dat.data.shape[0], 1)
     np.testing.assert_array_equal(dat.data, self.dat.data[[0]])
     np.testing.assert_array_equal(dat.axes[0], self.dat.axes[0][0])
     # normal case 2
     dat = select_classes(self.dat, [0, 1])
     self.assertEqual(dat.data.shape[0], 3)
     np.testing.assert_array_equal(dat.data[0], self.dat.data[0])
     np.testing.assert_array_equal(dat.data[1], self.dat.data[1])
     np.testing.assert_array_equal(dat.data[2], self.dat.data[3])
     np.testing.assert_array_equal(dat.axes[0], self.dat.axes[0][[0, 1, 3]])
     # remove one
     dat = select_classes(self.dat, [0], invert=True)
     self.assertEqual(dat.data.shape[0], 3)
     np.testing.assert_array_equal(dat.data, self.dat.data[1:])
     np.testing.assert_array_equal(dat.axes[0], self.dat.axes[0][1:])
     # remove every second
     dat = select_classes(self.dat, [0, 2], invert=True)
     self.assertEqual(dat.data.shape[0], 2)
     np.testing.assert_array_equal(dat.data, self.dat.data[[1, 3]])
     np.testing.assert_array_equal(dat.axes[0], self.dat.axes[0][[1, 3]])
示例#2
0
def plot_channels(dat, figsize=(12, 12), ncols=8, chanaxis=-1, otheraxis=-2):
    """Plot all channels for a continuous or epo.

    In case of an epoched Data object, the classwise average is
    calculated, and for each channel the respective classes are plotted.

    Parameters
    ----------
    dat : Data
        continous or epoched Data object
    ncols : int, optional
        the number of colums in the grid. The number of rows is
        calculated depending on ``ncols`` and the number of channels

    """
    # test if epo
    is_epo = False
    if dat.data.ndim == 3:
        is_epo = True
        dat = proc.calculate_classwise_average(dat)
    ax = []
    n_channels = dat.data.shape[chanaxis]
    nrows = int(np.ceil(n_channels / ncols))
    f, ax = plt.subplots(nrows=nrows,
                         ncols=ncols,
                         squeeze=False,
                         sharex=True,
                         sharey=True,
                         figsize=figsize)
    for i, chan in enumerate(dat.axes[chanaxis]):
        a = ax[i // ncols, i % ncols]
        dat.axes[otheraxis], dat.data.take([i], chanaxis)
        if is_epo:
            for j, name in enumerate(dat.class_names):
                cnt = proc.select_classes(dat, [j])
                a.plot(cnt.axes[otheraxis],
                       cnt.data.take([i], chanaxis).squeeze(),
                       label=name)
        else:
            a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())
        a.set_title(chan)
        a.axvline(x=0, color='black')
        a.axhline(y=0, color='black')
    plt.legend()
示例#3
0
文件: plot.py 项目: jetW/wyrm
def plot_channels(dat, ncols=8, chanaxis=-1, otheraxis=-2):
    """Plot all channels for a continuous or epo.

    In case of an epoched Data object, the classwise average is
    calculated, and for each channel the respective classes are plotted.

    Parameters
    ----------
    dat : Data
        continous or epoched Data object
    ncols : int, optional
        the number of colums in the grid. The number of rows is
        calculated depending on ``ncols`` and the number of channels

    """
    # test if epo
    is_epo = False
    if dat.data.ndim == 3:
        is_epo = True
        dat = proc.calculate_classwise_average(dat)
    ax = []
    n_channels = dat.data.shape[chanaxis]
    nrows = int(np.ceil(n_channels / ncols))
    f, ax = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True);
    for i, chan in enumerate(dat.axes[chanaxis]):
        a = ax[i // ncols, i % ncols]
        dat.axes[otheraxis], dat.data.take([i], chanaxis)
        if is_epo:
            for j, name in enumerate(dat.class_names):
                cnt = proc.select_classes(dat, [j])
                a.plot(cnt.axes[otheraxis], cnt.data.take([i], chanaxis).squeeze(), label=name)
        else:
            a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())
        a.set_title(chan)
        a.axvline(x=0, color='black')
        a.axhline(y=0, color='black')
    plt.legend()
 def test_select_classes_copy(self):
     """Select classes must not modify argument."""
     cpy = self.dat.copy()
     select_classes(self.dat, [0, 1])
     self.assertEqual(self.dat, cpy)
 def test_select_classes_swapaxes(self):
     """Select classes must work with nonstandard classaxis."""
     dat = select_classes(swapaxes(self.dat, 0, 2), [0], classaxis=2)
     dat = swapaxes(dat, 0, 2)
     dat2 = select_classes(self.dat, [0])
     self.assertEqual(dat, dat2)
 def test_select_classes_with_cnt(self):
     """Select epochs must raise an exception if called with cnt argument."""
     del(self.dat.class_names)
     with self.assertRaises(AssertionError):
         select_classes(self.dat, [0, 1])
示例#7
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)

        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + \
                range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = (sum(correct_train == predicted_train) /
                          float(len(predicted_train)))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = (sum(correct_test == predicted_test) /
                         float(len(predicted_test)))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters,
                                                    columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(bp_nr, fold_nr, pair_nr, filters[:, columns],
                           patterns[:, columns], variances[columns],
                           train_feature, test_feature,
                           train_feature_full_fold, test_feature_full_fold,
                           clf, train_accuracy, test_accuracy)
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)
示例#8
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)
        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = sum(correct_train == predicted_train) / float(len(predicted_train))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = sum(correct_test == predicted_test) / float(len(predicted_test))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters, columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(
            bp_nr,
            fold_nr,
            pair_nr,
            filters[:, columns],
            patterns[:, columns],
            variances[columns],
            train_feature,
            test_feature,
            train_feature_full_fold,
            test_feature_full_fold,
            clf,
            train_accuracy,
            test_accuracy,
        )
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)