Пример #1
0
def test_lowrank_matrix_approx():
    """
    Test low rank matrix approximation

    """

    Xobs, Xtrue = generate_lowrank_matrix()

    # helper function to test relative error of given parameters
    def test_error(Xhat):
        test_err = np.linalg.norm(Xhat - Xtrue, 'fro')
        naive_err = np.linalg.norm(Xobs - Xtrue, 'fro')
        err_ratio = test_err / naive_err
        assert err_ratio <= 0.5

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in ['sgd', 'nag']:

        # objective
        def f_df(X):
            grad = X - Xtrue
            obj = 0.5 * np.linalg.norm(grad.ravel()) ** 2
            return obj, grad

        # optimizer
        opt = GradientDescent(Xobs, f_df, algorithm, {'lr': 5e-3}, proxop=nucnorm(0.2), rho=1.0)
        opt.callbacks = []
        opt.run(maxiter=5000)

        # test
        test_error(opt.theta)
Пример #2
0
def test_sparse_regression():
    """
    Test sparse regression

    """

    A, y, x_true = generate_sparse_system()

    # least squares solution
    xls = np.linalg.lstsq(A, y)[0]

    # helper function to test relative error
    def test_error(xhat):
        test_err = np.linalg.norm(xhat - x_true, 2)
        naive_err = np.linalg.norm(xls - x_true, 2)
        err_ratio = test_err / naive_err
        assert err_ratio <= 0.01

    # Proximal gradient descent and Accelerated proximal gradient descent
    for algorithm in ['sgd', 'nag']:

        # objective
        def f_df(x):
            err = A.dot(x) - y
            obj = 0.5 * np.linalg.norm(err) ** 2
            grad = A.T.dot(err)
            return obj, grad

        # optimizer
        opt = GradientDescent(xls, f_df, algorithm, {'lr': 5e-3}, proxop=sparse(10.0), rho=1.0)
        opt.run(maxiter=5000)

        # test
        test_error(opt.theta)
Пример #3
0
def test_quadratic_bowl():
    """
    Test optimization in a quadratic bowl
    """

    t = np.linspace(0, 2*np.pi, 100)
    tol = 1e-3

    theta_true = [np.sin(t), np.cos(t)]
    theta_init = [np.cos(t), np.sin(t)]

    def f_df(theta):
        obj = 0.5*(theta[0]-theta_true[0])**2 + 0.5*(theta[1]-theta_true[1])**2
        grad = [theta[0]-theta_true[0], theta[1]-theta_true[1]]
        return np.sum(obj), grad

    opt = GradientDescent(theta_init, f_df, sgd, {'lr': 1e-2})
    opt.run(maxiter=1e3)

    for theta in zip(opt.theta, theta_true):
        assert np.linalg.norm(theta[0] - theta[1]) <= tol
Пример #4
0
def test_rosen(tol=1e-2):
    """Test minimization of the rosenbrock function"""

    # check that the gradient is zeros at the optimal point
    xstar = np.array([1, 1])
    assert np.all(rosenbrock(xstar)[1] == 0)

    # list of algorithms to test (and their parameters)
    algorithms = [('sgd', {'lr': 1e-3, 'momentum': 0.1}),
                  ('nag', {'lr': 1e-3}),
                  ('rmsprop', {'lr': 1e-3}),
                  ('adam', {'lr': 1e-3}),
                  ('sag', {'nterms': 2, 'lr': 2e-3})]

    # loop over algorithms
    for algorithm, options in algorithms:

        # initialize
        opt = GradientDescent(np.zeros(2), rosenbrock, algorithm, options)

        # run the optimization algorithm
        opt.run(maxiter=1e4)
        assert np.linalg.norm(opt.theta - xstar) <= tol