Ejemplo n.º 1
0
    def _score_combination(self, scores):
        """Internal function for combining univarite scores.
        """

        # combine by different approaches
        if self.method == 'average':
            return average(scores, estimator_weights=self.weights)
        if self.method == 'maximization':
            return maximization(scores)
        if self.method == 'median':
            return median(scores)
Ejemplo n.º 2
0
                X_test,
                n_estimators,
                # rp_flags[starts[i]:starts[i + 1]],
                jl_transformers,
                approx_flags[starts[i]:starts[i + 1]],
                verbose=True) for i in range(n_jobs))

    print('Orig decision_function time:', time.time() - start)
    print()

    # unfold and generate the label matrix
    predicted_scores_orig = np.zeros([X_test.shape[0], n_estimators])
    for i in range(n_jobs):
        predicted_scores_orig[:, starts[i]:starts[i + 1]] = np.asarray(
            all_results_scores[i]).T
    ##########################################################################
    predicted_scores = standardizer(predicted_scores)
    predicted_scores_orig = standardizer(predicted_scores_orig)

    evaluate_print('orig', y_test, average(predicted_scores_orig))
    evaluate_print('new', y_test, average(predicted_scores))

    evaluate_print('orig max', y_test, maximization(predicted_scores_orig))
    evaluate_print('new max', y_test, maximization(predicted_scores))

    evaluate_print('orig aom', y_test, aom(predicted_scores_orig))
    evaluate_print('new aom', y_test, aom(predicted_scores))

    evaluate_print('orig moa', y_test, moa(predicted_scores_orig))
    evaluate_print('new moa', y_test, moa(predicted_scores))
Ejemplo n.º 3
0
        train_test_split(X, y, test_size=0.4, random_state=42)

    contamination = y.sum() / len(y)
    base_estimators = get_estimators_small(contamination)

    model = SUOD(base_estimators=base_estimators,
                 n_jobs=6,
                 bps_flag=True,
                 contamination=contamination,
                 approx_flag_global=True)

    model.fit(X_train)  # fit all models with X
    model.approximate(X_train)  # conduct model approximation if it is enabled
    predicted_labels = model.predict(X_test)  # predict labels
    predicted_scores = model.decision_function(X_test)  # predict scores
    predicted_probs = model.predict_proba(X_test)  # predict scores

    ###########################################################################
    # compared with other approaches
    evaluate_print('majority vote', y_test, majority_vote(predicted_labels))
    evaluate_print('average', y_test, average(predicted_scores))
    evaluate_print('maximization', y_test, maximization(predicted_scores))

    clf = LOF()
    clf.fit(X_train)
    evaluate_print('LOF', y_test, clf.decision_function(X_test))

    clf = IForest()
    clf.fit(X_train)
    evaluate_print('IForest', y_test, clf.decision_function(X_test))
Ejemplo n.º 4
0
 def test_weighted_average(self):
     score = average(self.scores, self.weights)
     assert_allclose(score, np.array([1.75, 3.75, 5.75]))
Ejemplo n.º 5
0
 def test_average(self):
     score = average(self.scores)
     assert_allclose(score, np.array([1.5, 3.5, 5.5]))