def test_projected_gradient_descent():
    """Test sparse regression"""

    A, y, x_true, xls, ls_error = generate_sparse_system()

    # helper function to test relative error
    def test_error(xhat):
        assert relative_error(xhat, x_true, ls_error) <= 0.01

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in ['sgd', 'nag']:

        # objective
        def f_df(x):
            err = A.dot(x) - y
            obj = 0.5 * np.linalg.norm(err) ** 2
            grad = A.T.dot(err)
            return obj, grad

        # optimizer
        opt = getattr(algorithms, algorithm)(lr=5e-3)
        opt.add(sparse(10.0))

        # run it
        res = opt.minimize(f_df, xls, display=None, maxiter=5000)

        # test
        test_error(res.x)
def test_sparse_regression():
    """
    Test sparse regression

    """

    A, y, x_true = generate_sparse_system()

    # least squares solution
    xls = np.linalg.lstsq(A, y)[0]

    # helper function to test relative error
    def test_error(xhat):
        test_err = np.linalg.norm(xhat - x_true, 2)
        naive_err = np.linalg.norm(xls - x_true, 2)
        err_ratio = test_err / naive_err
        assert err_ratio <= 0.01

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in ['sgd', 'nag']:

        # objective
        def f_df(x):
            err = A.dot(x) - y
            obj = 0.5 * np.linalg.norm(err) ** 2
            grad = A.T.dot(err)
            return obj, grad

        # optimizer
        opt = GradientDescent(xls, f_df, algorithm, {'lr': 5e-3}, proxop=sparse(10.0), rho=1.0)
        opt.run(maxiter=5000)

        # test
        test_error(opt.theta)
def test_projected_gradient_descent():
    """Test sparse regression"""

    A, y, x_true, xls, ls_error = generate_sparse_system()

    # helper function to test relative error
    def test_error(xhat):
        assert relative_error(xhat, x_true, ls_error) <= 0.01

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in ['sgd', 'nag']:

        # objective
        def f_df(x):
            err = A.dot(x) - y
            obj = 0.5 * np.linalg.norm(err)**2
            grad = A.T.dot(err)
            return obj, grad

        # optimizer
        opt = getattr(algorithms, algorithm)(lr=5e-3)
        opt.add(sparse(10.0))

        # run it
        res = opt.minimize(f_df, xls, display=None, maxiter=5000)

        # test
        test_error(res.x)
Beispiel #4
0
def test_sparse():
    pen = 0.1
    rho = 0.1
    v = np.linspace(-5, 5, 1e3)

    gamma = pen / rho
    x = (v - gamma * np.sign(v)) * (np.abs(v) > gamma)

    op = proxops.sparse(pen)

    assert np.allclose(op(v, rho), x)
Beispiel #5
0
def test_sparse():
    pen = 0.1
    rho = 0.1
    v = np.linspace(-5, 5, 1e3)

    gamma = pen / rho
    x = (v - gamma * np.sign(v)) * (np.abs(v) > gamma)

    op = proxops.sparse(pen)

    assert np.allclose(op(v, rho), x)
def test_consensus():
    """Test the consensus optimizer (ADMM)"""

    A, y, x_true, xls, ls_error = generate_sparse_system()

    # optimizer
    opt = Consensus()
    opt.add(linsys(A, y))
    opt.add(sparse(10.0))

    # run it
    res = opt.minimize(xls, display=None, maxiter=5000)

    # test
    assert relative_error(res.x, x_true, ls_error) <= 0.05
def test_consensus():
    """Test the consensus optimizer (ADMM)"""

    A, y, x_true, xls, ls_error = generate_sparse_system()

    # optimizer
    opt = Consensus()
    opt.add(linsys(A, y))
    opt.add(sparse(10.0))

    # run it
    res = opt.minimize(xls, display=None, maxiter=5000)

    # test
    assert relative_error(res.x, x_true, ls_error) <= 0.05