Example #1
0
def test_gentleboost_hastie_fitting():
    c = GentleBoostClassifier(
        base_estimator=DecisionTreeRegressor(max_depth=1),
        n_estimators=30,
        learning_rate=1.0
    )
    data = Hastie_10_2()
    c.fit(data.data, np.sign(data.labels))
    assert_array_less(c.estimator_errors_, 0.5)
    assert zero_one_loss(np.sign(data.labels), c.predict(data.data)) < 0.2
Example #2
0
from sklearn.metrics import zero_one_loss
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor

from skboost.gentleboost import GentleBoostClassifier
from skboost.datasets import MUSK1, MUSK2, Hastie_10_2
from skboost.milboost.softmax import *

ds_train = MUSK2()
print(ds_train)
ds_test = MUSK1()
print(ds_test)
#ds_train = Hastie_10_2()
#ds_test = Hastie_10_2(random_state=1)

classifier = GentleBoostClassifier(
    base_estimator=DecisionTreeRegressor(max_depth=1),
    n_estimators=20,
    learning_rate=1.0)
classifier.fit(ds_train.data, np.sign(ds_train.labels))
print(classifier)
print(classifier.predict_proba(ds_test.data))

# Calculate step-by-step results per instance on training data.
milboost_train_error = np.zeros((len(classifier.estimators_), ))
for i, y_pred in enumerate(classifier.staged_predict(ds_train.data)):
    milboost_train_error[i] = zero_one_loss(y_pred, np.sign(ds_train.labels))

# Calculate step-by-step results per instance on test data.
milboost_test_error = np.zeros((len(classifier.estimators_), ))
for i, y_pred in enumerate(classifier.staged_predict(ds_test.data)):
    milboost_test_error[i] = zero_one_loss(y_pred, np.sign(ds_test.labels))
Example #3
0
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor

from skboost.gentleboost import GentleBoostClassifier
from skboost.datasets import MUSK1, MUSK2, Hastie_10_2
from skboost.milboost.softmax import *

ds_train = MUSK2()
print(ds_train)
ds_test = MUSK1()
print(ds_test)
#ds_train = Hastie_10_2()
#ds_test = Hastie_10_2(random_state=1)


classifier = GentleBoostClassifier(
    base_estimator=DecisionTreeRegressor(max_depth=1),
    n_estimators=20,
    learning_rate=1.0)
classifier.fit(ds_train.data, np.sign(ds_train.labels))
print(classifier)
print(classifier.predict_proba(ds_test.data))

# Calculate step-by-step results per instance on training data.
milboost_train_error = np.zeros((len(classifier.estimators_),))
for i, y_pred in enumerate(classifier.staged_predict(ds_train.data)):
    milboost_train_error[i] = zero_one_loss(y_pred, np.sign(ds_train.labels))

# Calculate step-by-step results per instance on test data.
milboost_test_error = np.zeros((len(classifier.estimators_),))
for i, y_pred in enumerate(classifier.staged_predict(ds_test.data)):
    milboost_test_error[i] = zero_one_loss(y_pred, np.sign(ds_test.labels))