Example #1
0
def test_effect_of_beta():
    # higher beta should lead to slower learning, thus lower weights
    mean_abs_weights = []
    for beta in [10**n for n in range(7)]:
        clf = OGDLR(beta=beta)
        clf.fit(X[:100], y[:100])
        mean_abs_weights.append(np.abs(clf.weights()).mean())
    assert np.allclose(mean_abs_weights[-1], 0, atol=1e-6)
    assert all(np.diff(mean_abs_weights) < 0)
Example #2
0
def test_effect_of_beta():
    # higher beta should lead to slower learning, thus lower weights
    mean_abs_weights = []
    for beta in [10 ** n for n in range(7)]:
        clf = OGDLR(beta=beta)
        clf.fit(X[:100], y[:100])
        mean_abs_weights.append(np.abs(clf.weights()).mean())
    assert np.allclose(mean_abs_weights[-1], 0, atol=1e-6)
    assert all(np.diff(mean_abs_weights) < 0)
Example #3
0
def test_effect_of_alpha():
    # higher alpha should lead to faster learning, thus higher weights
    mean_abs_weights = []
    for alpha in [0, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1]:
        clf = OGDLR(alpha=alpha)
        clf.fit(X[:100], y[:100])
        mean_abs_weights.append(np.abs(clf.weights()).mean())
    assert mean_abs_weights[0] == 0.
    assert all(np.diff(mean_abs_weights) > 0)
Example #4
0
def test_effect_of_alpha():
    # higher alpha should lead to faster learning, thus higher weights
    mean_abs_weights = []
    for alpha in [0, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1]:
        clf = OGDLR(alpha=alpha)
        clf.fit(X[:100], y[:100])
        mean_abs_weights.append(np.abs(clf.weights()).mean())
    assert mean_abs_weights[0] == 0.
    assert all(np.diff(mean_abs_weights) > 0)