Ejemplo n.º 1
0
def test_not_robust_regression(loss, weighting):
    reg = RobustWeightedRegressor(
        loss=loss,
        max_iter=100,
        weighting=weighting,
        k=0,
        c=1e7,
        burn_in=0,
        random_state=rng,
    )
    reg_not_rob = SGDRegressor(loss=loss, random_state=rng)
    reg.fit(X_r, y_r)
    reg_not_rob.fit(X_r, y_r)
    pred1 = reg.predict(X_r)
    pred2 = reg_not_rob.predict(X_r)
    difference = [
        np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
    ]
    assert np.mean(difference) < 1
    assert_almost_equal(reg.score(X_r, y_r), r2_score(y_r, reg.predict(X_r)))
def test_corrupted_regression(loss, weighting, k, c):
    reg = RobustWeightedRegressor(
        loss=loss,
        max_iter=50,
        weighting=weighting,
        k=k,
        c=None,
        random_state=rng,
    )
    reg.fit(X_rc, y_rc)
    score = median_absolute_error(reg.predict(X_rc), y_rc)
    assert score < 0.2
def test_not_robust_regression(loss, weighting):
    clf = RobustWeightedRegressor(
        loss=loss,
        max_iter=100,
        weighting=weighting,
        k=0,
        c=1e7,
        burn_in=0,
        random_state=rng,
    )
    clf_not_rob = SGDRegressor(loss=loss, random_state=rng)
    clf.fit(X_r, y_r)
    clf_not_rob.fit(X_r, y_r)
    pred1 = clf.predict(X_r)
    pred2 = clf_not_rob.predict(X_r)
    difference = [
        np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
    ]
    assert np.mean(difference) < 1e-1