Exemple #1
0
def test_initialise2():
    with catch_warnings(record=True) as w:
        print("Testing initialisation2")
        X, Y = data_create(20, 5)
        zero = np.zeros(5)
        alpha, beta = initialise_candidates2(X, Y, 0.1)
        assert beta > 0
        assert loss_smooth(alpha, X, Y, 0.1, beta=beta) <= loss_smooth(
            zero, X, Y, 0.1, beta=beta)
        X, Y = data_create(20, 12)
        zero = np.zeros(12)
        alpha, beta = initialise_candidates2(X, Y, 0.1)
        assert beta > 0
        assert loss_smooth(alpha, X, Y, 0.1, beta=beta) <= loss_smooth(
            zero, X, Y, 0.1, beta=beta)
        X, Y = data_create(20, 11)
        X = add_intercept_column(X)
        zero = np.zeros(12)
        alpha, beta = initialise_candidates2(X, Y, 0.1)
        assert beta > 0
        assert loss_smooth(alpha, X, Y, 0.1, beta=beta) <= loss_smooth(
            zero, X, Y, 0.1, beta=beta)
        X, Y = data_create(20, 8)
        w = np.random.uniform(size=20)
        zero = np.zeros(8)
        alpha, beta = initialise_candidates2(X, Y, 0.1, w)
        assert beta > 0
        assert loss_smooth(alpha, X, Y, 0.1, beta=beta,
                           weight=w) <= loss_smooth(
                               zero, X, Y, 0.1, beta=beta, weight=w)
Exemple #2
0
def test_loss():
    print("Testing loss functions")
    X, Y = data_create(20, 5)
    w = np.random.uniform(size=20)
    alpha = np.random.normal(size=5)
    assert loss_smooth(alpha, X, Y, 0.1) <= 0
    assert loss_sharp(alpha, X, Y, 0.1) <= 0
    assert loss_numba(alpha, X, Y, 0.1, lambda2=0, beta=0)[0] <= 0
    assert loss_smooth(alpha, X, Y, 10) < 0
    assert loss_sharp(alpha, X, Y, 10) < 0
    assert loss_numba(alpha, X, Y, 10, lambda2=0, beta=0)[0] < 0
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, beta=1000000), loss_sharp(alpha, X, Y, 0.1)
    )
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, lambda1=0.5, beta=1000000),
        loss_sharp(alpha, X, Y, 0.1, lambda1=0.5),
    )
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, lambda2=0.5, beta=1000000),
        loss_sharp(alpha, X, Y, 0.1, lambda2=0.5),
    )
    assert loss_smooth(alpha, X, Y, 0.1, beta=20, lambda1=0.0, lambda2=0.0) == approx(
        loss_numba(alpha, X, Y, 0.1, lambda2=0.0, beta=20)[0], 1e-8
    )
    assert loss_smooth(alpha, X, Y, 0.1, beta=20, lambda1=0.0, lambda2=0.5) == approx(
        loss_numba(alpha, X, Y, 0.1, lambda2=0.5, beta=20)[0], 1e-8
    )
    # With weight
    assert loss_smooth(alpha, X, Y, 0.1, weight=w) <= 0
    assert loss_sharp(alpha, X, Y, 0.1, weight=w) <= 0
    assert loss_numba(alpha, X, Y, 0.1, lambda2=0, beta=0, weight=w)[0] <= 0
    assert loss_smooth(alpha, X, Y, 10, weight=w) < 0
    assert loss_sharp(alpha, X, Y, 10, weight=w) < 0
    assert loss_numba(alpha, X, Y, 10, lambda2=0, beta=0, weight=w)[0] < 0
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, beta=1000000, weight=w),
        loss_sharp(alpha, X, Y, 0.1, weight=w),
    )
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, lambda1=0.5, beta=1000000, weight=w),
        loss_sharp(alpha, X, Y, 0.1, lambda1=0.5, weight=w),
    )
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, lambda2=0.5, beta=1000000, weight=w),
        loss_sharp(alpha, X, Y, 0.1, lambda2=0.5, weight=w),
    )
    assert loss_smooth(alpha, X, Y, 0.1, beta=20, weight=w, lambda2=0.0) == approx(
        loss_numba(alpha, X, Y, 0.1, lambda2=0.0, weight=w, beta=20)[0], 1e-8
    )
    assert loss_smooth(alpha, X, Y, 0.1, beta=20, weight=w, lambda2=0.5) == approx(
        loss_numba(alpha, X, Y, 0.1, lambda2=0.5, weight=w, beta=20)[0], 1e-8
    )
Exemple #3
0
def test_gradopt():
    print("Testing graduated optimisation")
    X, Y = data_create(20, 5)
    alpha = np.random.normal(size=5)
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100
    )
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda1=0.5)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, lambda1=0.5) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, lambda1=0.5
    )
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda2=0.5)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, lambda2=0.5) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, lambda2=0.5
    )
    # With weight
    w = np.random.uniform(size=20)
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100, weight=w)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, weight=w) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, weight=w
    )
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda1=0.5, weight=w)
    assert loss_smooth(
        alpha, X, Y, 0.1, beta=100, lambda1=0.5, weight=w
    ) >= loss_smooth(alpha2, X, Y, 0.1, beta=100, lambda1=0.5, weight=w)
    alpha2 = graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w)
    assert loss_smooth(
        alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w
    ) >= loss_smooth(alpha2, X, Y, 0.1, beta=100, lambda2=0.5, weight=w)
Exemple #4
0
def test_weights():
    X, Y, mod = data_create2(20, 5)
    w1 = None
    w2 = np.ones(20)
    w3 = w2 * 2
    X2 = np.concatenate((X, X), 0)
    Y2 = np.concatenate((Y, Y), 0)
    alpha = np.random.normal(size=5)
    assert np.allclose(
        loss_sharp(alpha, X, Y, 0.1, weight=w1),
        loss_sharp(alpha, X, Y, 0.1, weight=w2),
    )
    assert np.allclose(
        loss_sharp(alpha, X2, Y2, 0.1, weight=w1),
        loss_sharp(alpha, X, Y, 0.1, weight=w3),
    )
    assert np.allclose(
        loss_smooth(alpha, X, Y, 0.1, 10, weight=w1),
        loss_smooth(alpha, X, Y, 0.1, 10, weight=w2),
    )
    assert np.allclose(
        loss_smooth(alpha, X2, Y2, 0.1, 10, weight=w1),
        loss_smooth(alpha, X, Y, 0.1, 10, weight=w3),
    )
    assert np.allclose(
        graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w1),
        graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w2),
    )
    assert np.allclose(
        graduated_optimisation(alpha, X2, Y2, 0.1, beta=100, lambda2=0.5, weight=w1),
        graduated_optimisation(alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w3),
    )
    assert np.allclose(
        matching_epsilon((X @ alpha - Y) ** 2, 0.01, 10, w1),
        matching_epsilon((X @ alpha - Y) ** 2, 0.01, 10, w2),
    )
    assert np.allclose(
        matching_epsilon((X2 @ alpha - Y2) ** 2, 0.01, 100, w1),
        matching_epsilon((X @ alpha - Y) ** 2, 0.01, 100, w3),
    )
    assert np.allclose(
        next_beta((X @ alpha - Y) ** 2, 0.01, 100, w1),
        next_beta((X @ alpha - Y) ** 2, 0.01, 100, w2),
    )
    assert np.allclose(
        next_beta((X2 @ alpha - Y2) ** 2, 0.01, 100, w1),
        next_beta((X @ alpha - Y) ** 2, 0.01, 100, w3),
    )
    assert np.allclose(
        log_approximation_ratio((X @ alpha - Y) ** 2, 0.01, 1, 100, w1),
        log_approximation_ratio((X @ alpha - Y) ** 2, 0.01, 1, 100, w2),
    )
    assert np.allclose(
        log_approximation_ratio((X2 @ alpha - Y2) ** 2, 0.01, 1, 100, w1),
        log_approximation_ratio((X @ alpha - Y) ** 2, 0.01, 1, 100, w3),
    )
    assert np.allclose(
        regularised_regression(X, Y, 1e-4, 1e-4, weight=w1),
        regularised_regression(X, Y, 1e-4, 1e-4, weight=w2),
    )
    assert np.allclose(
        regularised_regression(X2, Y2, 1e-4, 1e-4, weight=w1),
        regularised_regression(X, Y, 1e-4, 1e-4, weight=w3),
        atol=1e-4,
    )
Exemple #5
0
def test_owlqn():
    print("Testing owlqn")
    X, Y = data_create(20, 5)
    alpha = np.random.normal(size=5)
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100
    )
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100, lambda1=0.5)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, lambda1=0.5) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, lambda1=0.5
    )
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100, lambda2=0.5)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, lambda2=0.5) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, lambda2=0.5
    )
    # With weight
    w = np.random.uniform(size=20)
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100, weight=w)
    assert loss_smooth(alpha, X, Y, 0.1, beta=100, weight=w) >= loss_smooth(
        alpha2, X, Y, 0.1, beta=100, weight=w
    )
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100, lambda1=0.5, weight=w)
    assert loss_smooth(
        alpha, X, Y, 0.1, beta=100, lambda1=0.5, weight=w
    ) >= loss_smooth(alpha2, X, Y, 0.1, beta=100, lambda1=0.5, weight=w)
    alpha2 = optimise_loss(alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w)
    assert loss_smooth(
        alpha, X, Y, 0.1, beta=100, lambda2=0.5, weight=w
    ) >= loss_smooth(alpha2, X, Y, 0.1, beta=100, lambda2=0.5, weight=w)