Exemplo n.º 1
0
def test_ogdlr_learns():
    # test that this model learns something
    y_pred = ogdlr_before.predict_proba(X)
    ll_before = logloss(y, y_pred)

    y_pred = ogdlr_after.predict_proba(X)
    ll_after = logloss(y, y_pred)
    assert ll_before > ll_after
Exemplo n.º 2
0
def test_ogdlr_learns():
    # test that this model learns something
    y_pred = ogdlr_before.predict_proba(X)
    ll_before = logloss(y, y_pred)

    y_pred = ogdlr_after.predict_proba(X)
    ll_after = logloss(y, y_pred)
    assert ll_before > ll_after
Exemplo n.º 3
0
def test_models_same_predictions():
    # for lambda1, lambda2 = 0, OGDLR and FTRLprox should generate the
    # same result. The same goes if hashing is used (except for the
    # rare case of hash collisions. A neural net does not necessarily
    # predict exactly the same outcome.
    y_f = ftrl_after.predict_proba(X)
    y_o = ogdlr_after.predict_proba(X)
    y_h = hash_after.predict_proba(X)
    assert np.allclose(y_f, y_o, atol=1e-15)
    assert np.allclose(y_f, y_h, atol=1e-15)
Exemplo n.º 4
0
def test_models_same_predictions():
    # for lambda1, lambda2 = 0, OGDLR and FTRLprox should generate the
    # same result. The same goes if hashing is used (except for the
    # rare case of hash collisions. A neural net does not necessarily
    # predict exactly the same outcome.
    y_f = ftrl_after.predict_proba(X)
    y_o = ogdlr_after.predict_proba(X)
    y_h = hash_after.predict_proba(X)
    assert np.allclose(y_f, y_o, atol=1e-15)
    assert np.allclose(y_f, y_h, atol=1e-15)
Exemplo n.º 5
0
def test_predict_proba(n_samples):
    y_prob = ogdlr_after.predict_proba(X[:n_samples])
    assert len(y_prob) == n_samples
    assert all([isinstance(pr, float) for pr in y_prob])
Exemplo n.º 6
0
def test_predict_proba(n_samples):
    y_prob = ogdlr_after.predict_proba(X[:n_samples])
    assert len(y_prob) == n_samples
    assert all([isinstance(pr, float) for pr in y_prob])