Beispiel #1
0
 def train_classifiers(self):
     n_folds = len(self.binary_csp.folds)
     n_class_pairs = len(self.binary_csp.class_pairs)
     self.clf = np.empty((n_folds, n_class_pairs), dtype=object)
     for fold_i in range(n_folds):
         for class_i in range(n_class_pairs):
             train_feature = self.train_feature[fold_i, class_i]
             clf = lda_train_scaled(train_feature, shrink=True)
             self.clf[fold_i, class_i] = clf
Beispiel #2
0
 def train_classifiers(self):
     n_folds = len(self.binary_csp.folds)
     n_class_pairs = len(self.binary_csp.class_pairs)
     self.clf = np.empty((n_folds, n_class_pairs), dtype=object)
     for fold_i in range(n_folds):
         for class_i in range(n_class_pairs):
             train_feature = self.train_feature[fold_i, class_i]
             clf = lda_train_scaled(train_feature, shrink=True)
             self.clf[fold_i, class_i] = clf
def test_lda_train_scaled():
    ## these values were checked with matlab
    # matlab had almost same values (<0.01 differences on weights,
    # < 0.1 on bias)
    featurevector = lambda: None
    featurevector.data = np.array([[8.2, 3.5, 5.6], [9.1, 1.2, 2.4],
                                   [8.8, 3.5, 5.6], [7.1, 1.5, 2.9]])
    featurevector.axes = [[1, 0, 1, 0], []]
    w, b = lda_train_scaled(featurevector, shrink=True)
    assert np.allclose([0.13243638, 0.31727594, 0.42877362], w)
    assert np.allclose(-3.6373072863307785, b)
def test_lda_train_scaled():
    ## these values were checked with matlab
    # matlab had almost same values (<0.01 differences on weights, 
    # < 0.1 on bias)
    featurevector = lambda: None
    featurevector.data = np.array([[8.2, 3.5, 5.6], [9.1, 1.2, 2.4], 
        [8.8, 3.5, 5.6], [7.1, 1.5, 2.9]])
    featurevector.axes = [[1,0,1,0], []]
    w,b = lda_train_scaled(featurevector, shrink=True)
    assert np.allclose([ 0.13243638,  0.31727594,  0.42877362], w)
    assert np.allclose(-3.6373072863307785, b)
Beispiel #5
0
 def cross_validate_lda(features):
     folds = KFold(features.data.shape[0], n_folds=5, shuffle=False)
     test_accuracies = []
     for train_inds, test_inds in folds:
         train_features = features.copy(data=features.data[train_inds], axes=[features.axes[0][train_inds]])
         test_features = features.copy(data=features.data[test_inds], axes=[features.axes[0][test_inds]])
         clf = lda_train_scaled(train_features, shrink=True)
         test_out = lda_apply(test_features, clf)
         second_class_test = test_features.axes[0] == np.max(test_features.axes[0])
         predicted_2nd_class_test = test_out >= 0
         test_accuracy = sum(second_class_test == predicted_2nd_class_test) / float(len(predicted_2nd_class_test))
         test_accuracies.append(test_accuracy)
     return np.mean(test_accuracies)
Beispiel #6
0
 def cross_validate_lda(features):
     folds = KFold(features.data.shape[0], n_folds=5, shuffle=False)
     test_accuracies = []
     for train_inds, test_inds in folds:
         train_features = features.copy(data=features.data[train_inds],
                                        axes=[features.axes[0][train_inds]])
         test_features = features.copy(data=features.data[test_inds],
                                       axes=[features.axes[0][test_inds]])
         clf = lda_train_scaled(train_features, shrink=True)
         test_out = lda_apply(test_features, clf)
         second_class_test = test_features.axes[0] == np.max(
             test_features.axes[0])
         predicted_2nd_class_test = test_out >= 0
         test_accuracy = (
             sum(second_class_test == predicted_2nd_class_test) /
             float(len(predicted_2nd_class_test)))
         test_accuracies.append(test_accuracy)
     return np.mean(test_accuracies)
Beispiel #7
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)

        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + \
                range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = (sum(correct_train == predicted_train) /
                          float(len(predicted_train)))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = (sum(correct_test == predicted_test) /
                         float(len(predicted_test)))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters,
                                                    columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(bp_nr, fold_nr, pair_nr, filters[:, columns],
                           patterns[:, columns], variances[columns],
                           train_feature, test_feature,
                           train_feature_full_fold, test_feature_full_fold,
                           clf, train_accuracy, test_accuracy)
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)
Beispiel #8
0
    def run_pair(self, epo_train, epo_test, bp_nr, fold_nr, pair_nr):
        class_pair = self.class_pairs[pair_nr]
        self.print_class_pair(class_pair)

        ### Run Training
        epo_train_pair = select_classes(epo_train, class_pair)
        epo_test_pair = select_classes(epo_test, class_pair)
        if self.ival_optimizer is not None:
            best_segment_ival = self.ival_optimizer.optimize(epo_train_pair)
            log.info("Ival {:.0f}ms - {:.0f}ms".format(*best_segment_ival))
            epo_train_pair = select_ival(epo_train_pair, best_segment_ival)
            epo_test_pair = select_ival(epo_test_pair, best_segment_ival)
            epo_train = select_ival(epo_train, best_segment_ival)
            epo_test = select_ival(epo_test, best_segment_ival)

        self.train_labels[fold_nr][pair_nr] = epo_train_pair.axes[0]
        self.test_labels[fold_nr][pair_nr] = epo_test_pair.axes[0]

        ## Calculate CSP
        filters, patterns, variances = calculate_csp(epo_train_pair)
        ## Apply csp, calculate features
        if self.n_filters is not None:
            # take topmost and bottommost filters, e.g.
            # for n_filters=3 0,1,2,-3,-2,-1
            columns = range(0, self.n_filters) + range(-self.n_filters, 0)
        else:  # take all possible filters
            columns = range(len(filters))
        train_feature = apply_csp_var_log(epo_train_pair, filters, columns)

        ## Calculate LDA
        clf = lda_train_scaled(train_feature, shrink=True)
        assert not np.any(np.isnan(clf[0]))
        assert not np.isnan(clf[1])
        ## Apply LDA to train
        train_out = lda_apply(train_feature, clf)
        correct_train = train_feature.axes[0] == class_pair[1]
        predicted_train = train_out >= 0
        train_accuracy = sum(correct_train == predicted_train) / float(len(predicted_train))

        ### Feature Computation and LDA Application for test
        test_feature = apply_csp_var_log(epo_test_pair, filters, columns)
        test_out = lda_apply(test_feature, clf)
        correct_test = test_feature.axes[0] == class_pair[1]
        predicted_test = test_out >= 0
        test_accuracy = sum(correct_test == predicted_test) / float(len(predicted_test))

        ### Feature Computations for full fold (for later multiclass)
        train_feature_full_fold = apply_csp_var_log(epo_train, filters, columns)
        test_feature_full_fold = apply_csp_var_log(epo_test, filters, columns)
        ### Store results
        # only store used patterns filters variances
        # to save memory space on disk
        self.store_results(
            bp_nr,
            fold_nr,
            pair_nr,
            filters[:, columns],
            patterns[:, columns],
            variances[columns],
            train_feature,
            test_feature,
            train_feature_full_fold,
            test_feature_full_fold,
            clf,
            train_accuracy,
            test_accuracy,
        )
        if self.ival_optimizer is not None:
            self.best_ival[bp_nr, fold_nr, pair_nr] = best_segment_ival

        self.print_results(bp_nr, fold_nr, pair_nr)