示例#1
0
def test_label_encoder_integration_sklearn_ensembles():
    pool_classifiers, X_dsel, y_dsel, X_test, y_test = setup_classifiers(
        encode_labels=['no', 'yes'])

    knorau = KNORAU(pool_classifiers)
    knorau.fit(X_dsel, y_dsel)
    assert np.isclose(knorau.score(X_test, y_test), 0.9787234042553191)
示例#2
0
def test_label_encoder_integration_sklearn_ensembles_not_encoding():

    rng = np.random.RandomState(123456)
    X_dsel, X_test, X_train, y_dsel, y_test, y_train = load_dataset(
        ['yes', 'no'], rng)

    # Train a pool of using adaboost which has label encoding problems.
    pool_classifiers = AdaBoostClassifier(n_estimators=10, random_state=rng)
    pool_classifiers.fit(X_train, y_train)

    knorau = KNORAU(pool_classifiers)
    knorau.fit(X_dsel, y_dsel)
    assert np.isclose(knorau.score(X_test, y_test), 0.9627659574468085)
示例#3
0
def test_label_encoder_integration_list_classifiers():
    rng = np.random.RandomState(123456)
    X_dsel, X_test, X_train, y_dsel, y_test, y_train = load_dataset(
        encode_labels=['no', 'yes'], rng=rng)

    pool_classifiers = [LogisticRegression(), SVC(probability=True)]
    [clf.fit(X_train, y_train) for clf in pool_classifiers]

    knorau = KNORAU(pool_classifiers)
    knorau.fit(X_dsel, y_dsel)

    this_score = knorau.score(X_test, y_test)
    assert np.isclose(this_score, 0.9787234042553191)
示例#4
0
def test_knorau(knn_methods):
    pool_classifiers, X_dsel, y_dsel, X_test, y_test = setup_classifiers()

    knorau = KNORAU(pool_classifiers, knn_classifier=knn_methods)
    knorau.fit(X_dsel, y_dsel)
    assert np.isclose(knorau.score(X_test, y_test), 0.9787234042553191)
示例#5
0
# be found in the static module. In this experiment we consider two types
# of stacking: one using logistic regression as meta-classifier
# (default configuration) and the other using a Decision Tree.
stacked_lr = StackedClassifier(pool_classifiers, random_state=rng)
stacked_dt = StackedClassifier(pool_classifiers,
                               random_state=rng,
                               meta_classifier=DecisionTreeClassifier())
# Fitting the DS techniques
knorau.fit(X_dsel, y_dsel)
kne.fit(X_dsel, y_dsel)
desp.fit(X_dsel, y_dsel)
metades.fit(X_dsel, y_dsel)
ola.fit(X_dsel, y_dsel)
mcb.fit(X_dsel, y_dsel)

# Fitting the tacking models
stacked_lr.fit(X_dsel, y_dsel)
stacked_dt.fit(X_dsel, y_dsel)

# Calculate classification accuracy of each technique
print('Evaluating DS techniques:')
print('Classification accuracy of Majority voting the pool: ',
      model_voting.score(X_test, y_test))
print('Classification accuracy of KNORA-U: ', knorau.score(X_test, y_test))
print('Classification accuracy of KNORA-E: ', kne.score(X_test, y_test))
print('Classification accuracy of DESP: ', desp.score(X_test, y_test))
print('Classification accuracy of META-DES: ', metades.score(X_test, y_test))
print('Classification accuracy of OLA: ', ola.score(X_test, y_test))
print('Classification accuracy Stacking LR', stacked_lr.score(X_test, y_test))
print('Classification accuracy Stacking DT', stacked_dt.score(X_test, y_test))