Ejemplo n.º 1
0
def test_logitboost_hastie_fitting():
    c = LogitBoostClassifier(base_estimator=DecisionTreeRegressor(max_depth=1),
                             n_estimators=30,
                             learning_rate=1.0)
    data = Hastie_10_2()
    c.fit(data.data, np.sign(data.labels))
    assert_array_less(c.estimator_errors_, 0.5)
    assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.2
Ejemplo n.º 2
0
def test_logitboost_hastie_fitting():
    c = LogitBoostClassifier(
            base_estimator=DecisionTreeRegressor(max_depth=1),
            n_estimators=30,
            learning_rate=1.0
    )
    data = Hastie_10_2()
    c.fit(data.data, np.sign(data.labels))
    assert_array_less(c.estimator_errors_, 0.5)
    assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.2
Ejemplo n.º 3
0
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import AdaBoostClassifier

from skboost.logitboost import LogitBoostClassifier
from skboost.datasets import MUSK1, MUSK2, Hastie_10_2
from skboost.milboost.softmax import *

#ds_train = MUSK2()
#print(ds_train)
#ds_test = MUSK1()
#print(ds_test)
ds_train = Hastie_10_2()
ds_test = Hastie_10_2(random_state=43143)

classifier = LogitBoostClassifier(
    base_estimator=DecisionTreeRegressor(max_depth=3),
    n_estimators=30,
    learning_rate=1.0)
classifier.fit(ds_train.data, np.sign(ds_train.labels))
print(classifier)
classifier.predict_proba(ds_train.data)

# Calculate step-by-step results per instance on training data.
milboost_train_error = np.zeros((len(classifier.estimators_), ))
for i, y_pred in enumerate(classifier.staged_predict(ds_train.data)):
    milboost_train_error[i] = zero_one_loss(y_pred, np.sign(ds_train.labels))

# Calculate step-by-step results per instance on test data.
milboost_test_error = np.zeros((len(classifier.estimators_), ))
for i, y_pred in enumerate(classifier.staged_predict(ds_test.data)):
    milboost_test_error[i] = zero_one_loss(y_pred, np.sign(ds_test.labels))
Ejemplo n.º 4
0
from sklearn.ensemble import AdaBoostClassifier

from skboost.logitboost import LogitBoostClassifier
from skboost.datasets import MUSK1, MUSK2, Hastie_10_2
from skboost.milboost.softmax import *

#ds_train = MUSK2()
#print(ds_train)
#ds_test = MUSK1()
#print(ds_test)
ds_train = Hastie_10_2()
ds_test = Hastie_10_2(random_state=43143)


classifier = LogitBoostClassifier(
    base_estimator=DecisionTreeRegressor(max_depth=3),
    n_estimators=30,
    learning_rate=1.0)
classifier.fit(ds_train.data, np.sign(ds_train.labels))
print(classifier)
classifier.predict_proba(ds_train.data)

# Calculate step-by-step results per instance on training data.
milboost_train_error = np.zeros((len(classifier.estimators_),))
for i, y_pred in enumerate(classifier.staged_predict(ds_train.data)):
    milboost_train_error[i] = zero_one_loss(y_pred, np.sign(ds_train.labels))

# Calculate step-by-step results per instance on test data.
milboost_test_error = np.zeros((len(classifier.estimators_),))
for i, y_pred in enumerate(classifier.staged_predict(ds_test.data)):
    milboost_test_error[i] = zero_one_loss(y_pred, np.sign(ds_test.labels))