示例#1
0
def test_binomial_deviance():
    # Check binomial deviance loss.
    # Check against alternative definitions in ESLII.
    bd = BinomialDeviance(2)

    # pred has the same BD for y in {0, 1}
    assert_equal(bd(np.array([0.0]), np.array([0.0])),
                 bd(np.array([1.0]), np.array([0.0])))

    assert_almost_equal(
        bd(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])), 0.0)
    assert_almost_equal(
        bd(np.array([1.0, 0.0, 0.0]), np.array([100.0, -100.0, -100.0])), 0)

    # check if same results as alternative definition of deviance (from ESLII)
    alt_dev = lambda y, pred: np.mean(
        np.logaddexp(0.0, -2.0 * (2.0 * y - 1) * pred))
    test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
                 (np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
                 (np.array([0.0, 0.0, 0.0]), np.array([-100.0, -100.0,
                                                       -100.0])),
                 (np.array([1.0, 1.0, 1.0]), np.array([-100.0, -100.0,
                                                       -100.0]))]

    for datum in test_data:
        assert_almost_equal(bd(*datum), alt_dev(*datum))

    # check the gradient against the
    alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
    for datum in test_data:
        assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_binomial_deviance():
    # Check binomial deviance loss.
    # Check against alternative definitions in ESLII.
    bd = BinomialDeviance(2)

    # pred has the same BD for y in {0, 1}
    assert_equal(bd(np.array([0.0]), np.array([0.0])),
                 bd(np.array([1.0]), np.array([0.0])))

    assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
                           np.array([100.0, 100.0, 100.0])),
                        0.0)
    assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
                           np.array([100.0, -100.0, -100.0])), 0)

    # check if same results as alternative definition of deviance (from ESLII)
    alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
                                                   (2.0 * y - 1) * pred))
    test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
                 (np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
                 (np.array([0.0, 0.0, 0.0]),
                  np.array([-100.0, -100.0, -100.0])),
                 (np.array([1.0, 1.0, 1.0]),
                  np.array([-100.0, -100.0, -100.0]))]

    for datum in test_data:
        assert_almost_equal(bd(*datum), alt_dev(*datum))

    # check the gradient against the
    alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
    for datum in test_data:
        assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
示例#3
0
def test_gradient_boosting_estimator_with_binomial_deviance_loss():
    np.random.seed(0)
    X, y = make_classification(n_classes=2)
    loss_function = BinomialDeviance(2)
    model = Booster(Earth(max_degree=2, use_fast=True, max_terms=10), loss_function)
    model.fit(X, y)
    assert_greater(np.sum(model.predict(X)==y) / float(y.shape[0]), .90)
    assert_true(np.all(0<=model.predict_proba(X)))
    assert_true(np.all(1>=model.predict_proba(X)))
示例#4
0
def test_sklearn2code_export():
    np.random.seed(0)
    X, y = make_classification(n_classes=2)
    X = DataFrame(X, columns=['x%d' % i for i in range(X.shape[1])])
    loss_function = BinomialDeviance(2)
    model = Booster(Earth(max_degree=2, use_fast=True, max_terms=10), loss_function)
    model.fit(X, y)
    code = sklearn2code(model, ['predict', 'predict_proba', 'transform'], numpy_flat)
    module = exec_module('test_module', code)
    assert_correct_exported_module(model, module, ['predict', 'predict_proba', 'transform'], dict(X=X), X)
 def __init__(self):
     BinomialDeviance.__init__(self,2)