Esempio n. 1
0
 def test_select_ival(self):
     """Selecting Intervals."""
     # normal case
     dat = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat.axes[1][0], -500)
     self.assertEqual(dat.axes[1][-1],-100)
     # the full dat interval
     dat = select_ival(self.dat, [self.dat.axes[1][0], self.dat.axes[1][-1] + 1])
     self.assertEqual(dat.axes[1][0], self.dat.axes[1][0])
     self.assertEqual(dat.axes[1][-1], self.dat.axes[1][-1])
     np.testing.assert_array_equal(dat.data, self.dat.data)
Esempio n. 2
0
 def test_select_ival(self):
     """Selecting Intervals."""
     # normal case
     dat = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat.axes[1][0], -500)
     self.assertEqual(dat.axes[1][-1], -100)
     # the full dat interval
     dat = select_ival(self.dat,
                       [self.dat.axes[1][0], self.dat.axes[1][-1] + 1])
     self.assertEqual(dat.axes[1][0], self.dat.axes[1][0])
     self.assertEqual(dat.axes[1][-1], self.dat.axes[1][-1])
     np.testing.assert_array_equal(dat.data, self.dat.data)
Esempio n. 3
0
 def test_select_ival_with_markers(self):
     """Selecting Intervals with markers."""
     # normal case
     good_markers = [[-499, 99, 'x'], [-500, 'x'], [-0.0001, 'x']]
     bad_markers = [[501, 'y'], [0, 'y'], [1, 'y']]
     self.dat.markers = good_markers[:]
     self.dat.markers.extend(bad_markers)
     dat = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat.markers, good_markers)
Esempio n. 4
0
 def test_select_ival_with_markers(self):
     """Selecting Intervals with markers."""
     # normal case
     good_markers = [[-499,99, 'x'], [-500, 'x'], [-0.0001, 'x']]
     bad_markers = [[501, 'y'], [0, 'y'], [1, 'y']]
     self.dat.markers = good_markers[:]
     self.dat.markers.extend(bad_markers)
     dat = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat.markers, good_markers)
Esempio n. 5
0
 def test_ival_checks(self):
     """Test for malformed ival parameter."""
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [0, -1])
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [self.dat.axes[1][0] - 1, 0])
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [0, self.dat.axes[1][-1] + 1])
Esempio n. 6
0
 def test_ival_checks(self):
     """Test for malformed ival parameter."""
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [0, -1])
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [self.dat.axes[1][0]-1, 0])
     with self.assertRaises(AssertionError):
         select_ival(self.dat, [0, self.dat.axes[1][-1]+1])
Esempio n. 7
0
def select_ival_with_markers(cnt, segment_ival):
    """Select the ival of the data that has markers inside.
    Respect segment ival.
    Keeps data from 2 sec before first marker + segment_ival[0] to
    2 sec after last marker + segment_ival[1]"""
    ms_first_marker = cnt.markers[0][0]
    
    # allow 2 sec before first marker and after last marker
    start = max(0, ms_first_marker + segment_ival[0] -2000)
    ms_last_marker = cnt.markers[-1][0]
    stop = ms_last_marker + segment_ival[1] + 2000
    
    cnt = select_ival(cnt, [start,stop])
    # possibly subtract first element of timeaxis so timeaxis starts at 0 again?
    return cnt
Esempio n. 8
0
def select_ival_with_markers(cnt, segment_ival):
    """Select the ival of the data that has markers inside.
    Respect segment ival.
    Keeps data from 2 sec before first marker + segment_ival[0] to
    2 sec after last marker + segment_ival[1]"""
    ms_first_marker = cnt.markers[0][0]

    # allow 2 sec before first marker and after last marker
    start = max(0, ms_first_marker + segment_ival[0] - 2000)
    ms_last_marker = cnt.markers[-1][0]
    stop = ms_last_marker + segment_ival[1] + 2000

    cnt = select_ival(cnt, [start, stop])
    # possibly subtract first element of timeaxis so timeaxis starts at 0 again?
    return cnt
Esempio n. 9
0
 def test_select_ival_swapaxes(self):
     """select_ival must work with nonstandard timeaxis."""
     dat = select_ival(swapaxes(self.dat, 0, 1), [-500, 0], timeaxis=0)
     dat = swapaxes(dat, 0, 1)
     dat2 = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat, dat2)
Esempio n. 10
0
 def test_select_ival_copy(self):
     """Select_ival must not modify the argument."""
     cpy = self.dat.copy()
     select_ival(cpy, [-500, 0])
     self.assertEqual(cpy, self.dat)
Esempio n. 11
0
 def test_select_ival_swapaxes(self):
     """select_ival must work with nonstandard timeaxis."""
     dat = select_ival(swapaxes(self.dat, 0, 1), [-500, 0], timeaxis=0)
     dat = swapaxes(dat, 0, 1)
     dat2 = select_ival(self.dat, [-500, 0])
     self.assertEqual(dat, dat2)
Esempio n. 12
0
 def test_select_ival_copy(self):
     """Select_ival must not modify the argument."""
     cpy = self.dat.copy()
     select_ival(cpy, [-500, 0])
     self.assertEqual(cpy, self.dat)
Esempio n. 13
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)

        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + \
                range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = (sum(correct_train == predicted_train) /
                          float(len(predicted_train)))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = (sum(correct_test == predicted_test) /
                         float(len(predicted_test)))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters,
                                                    columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(bp_nr, fold_nr, pair_nr, filters[:, columns],
                           patterns[:, columns], variances[columns],
                           train_feature, test_feature,
                           train_feature_full_fold, test_feature_full_fold,
                           clf, train_accuracy, test_accuracy)
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)
Esempio n. 14
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)
        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = sum(correct_train == predicted_train) / float(len(predicted_train))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = sum(correct_test == predicted_test) / float(len(predicted_test))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters, columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(
            bp_nr,
            fold_nr,
            pair_nr,
            filters[:, columns],
            patterns[:, columns],
            variances[columns],
            train_feature,
            test_feature,
            train_feature_full_fold,
            test_feature_full_fold,
            clf,
            train_accuracy,
            test_accuracy,
        )
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)