def test_sparse_regression():
    """
    Test sparse regression

    """

    A, y, x_true = generate_sparse_system()

    # least squares solution
    xls = np.linalg.lstsq(A, y)[0]

    # helper function to test relative error
    def test_error(xhat):
        test_err = np.linalg.norm(xhat - x_true, 2)
        naive_err = np.linalg.norm(xls - x_true, 2)
        err_ratio = test_err / naive_err
        assert err_ratio <= 0.01

    # ProximalConsensus
    opt = ProximalConsensus(xls)
    opt.add('linsys', A=A, b=y)
    opt.add('sparse', 1.)
    opt.display = None
    opt.storage = None
    opt.run(maxiter=100)

    # test error
    test_error(opt.theta)

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in [ProximalGradientDescent, AcceleratedProximalGradient]:

        # objective
        def f_df(x):
            err = A.dot(x) - y
            obj = 0.5 * np.linalg.norm(err) ** 2
            grad = A.T.dot(err)
            return obj, grad

        # sparsity penalty
        proxop = sparse(1.)

        # optimizer
        opt = algorithm(f_df, xls, proxop, learning_rate=0.005)
        opt.display = None
        opt.storage = None
        opt.run(maxiter=5000)

        # test
        test_error(opt.theta)
示例#2
0
def test_lowrank_matrix_approx():
    """
    Test low rank matrix approximation

    """

    Xobs, Xtrue = generate_lowrank_matrix()

    # helper function to test relative error of given parameters
    def test_error(Xhat):
        test_err = np.linalg.norm(Xhat - Xtrue, 'fro')
        naive_err = np.linalg.norm(Xobs - Xtrue, 'fro')
        err_ratio = test_err / naive_err
        assert err_ratio <= 0.5

    # proximal algorithm for low rank matrix approximation
    opt = ProximalConsensus(Xobs)
    opt.add('squared_error', Xobs)
    opt.add('nucnorm', 0.2)
    opt.display = None
    opt.storage = None
    opt.run(maxiter=100)

    # test error
    test_error(opt.theta)

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in [ProximalGradientDescent, AcceleratedProximalGradient]:

        # objective
        def f_df(X):
            grad = X - Xtrue
            obj = 0.5 * np.linalg.norm(grad.ravel()) ** 2
            return obj, grad

        # sparsity penalty
        proxop = nucnorm(0.2)

        # optimizer
        opt = algorithm(f_df, Xobs, proxop, learning_rate=0.005)
        opt.display = None
        opt.storage = None
        opt.run(maxiter=5000)

        # test
        test_error(opt.theta)