Ejemplo n.º 1
0
    def test_GradientMethod(self):
        n = 5
        lamda = 0.1
        A, x_numpy, y = self.Ax_y_setup(n, lamda)

        # Compute step-size
        lipschitz = np.linalg.svd(A.T @ A + lamda * np.eye(n),
                                  compute_uv=False)[0]
        alpha = 1.0 / lipschitz

        for accelerate in [True, False]:
            for proxg in [None, lambda alpha, x: x / (1 + lamda * alpha)]:
                with self.subTest(accelerate=accelerate, proxg=proxg):
                    x_sigpy = np.zeros([n])

                    def gradf(x):
                        gradf_x = A.T @ (A @ x - y)
                        if proxg is None:
                            gradf_x += lamda * x

                        return gradf_x

                    alg_method = alg.GradientMethod(gradf,
                                                    x_sigpy,
                                                    alpha,
                                                    accelerate=accelerate,
                                                    proxg=proxg,
                                                    max_iter=1000)

                    while (not alg_method.done()):
                        alg_method.update()

                    npt.assert_allclose(x_sigpy, x_numpy)
Ejemplo n.º 2
0
    def test_GradientMethod(self):
        n = 5
        A = np.random.random([n, n])
        x_orig = np.random.random([n])
        y = np.matmul(A, x_orig)
        lamda = 1.0
        x_truth = np.linalg.solve(
            np.matmul(A.T, A) + lamda * np.eye(n), np.matmul(A.T, y))

        # Compute step-size
        lipschitz = np.linalg.svd(np.matmul(A.T, A) + lamda * np.eye(n),
                                  compute_uv=False)[0]
        alpha = 1.0 / lipschitz

        # Gradient method
        x = np.zeros([n])
        alg_method = alg.GradientMethod(
            lambda x: np.matmul(A.T, (np.matmul(A, x) - y)) + lamda * x,
            x,
            alpha,
            accelerate=False)
        while (not alg_method.done()):
            alg_method.update()

        npt.assert_allclose(x, x_truth, atol=1, rtol=1e-3)

        # Accelerated gradient method
        x = np.zeros([n])
        alg_method = alg.GradientMethod(
            lambda x: np.matmul(A.T,
                                np.matmul(A, x) - y) + lamda * x,
            x,
            alpha,
            accelerate=True)
        while (not alg_method.done()):
            alg_method.update()

        npt.assert_allclose(x, x_truth, atol=1, rtol=1e-3)

        # Proximal gradient method
        x = np.zeros([n])
        alg_method = alg.GradientMethod(
            lambda x: np.matmul(A.T,
                                np.matmul(A, x) - y),
            x,
            alpha,
            accelerate=False,
            proxg=lambda alpha, x: x / (1 + lamda * alpha))
        while (not alg_method.done()):
            alg_method.update()

        npt.assert_allclose(x, x_truth, atol=1, rtol=1e-3)

        # Accelerated proximal gradient method
        x = np.zeros([n])
        alg_method = alg.GradientMethod(
            lambda x: np.matmul(A.T,
                                np.matmul(A, x) - y),
            x,
            alpha,
            proxg=lambda alpha, x: x / (1 + lamda * alpha),
            accelerate=True)
        while (not alg_method.done()):
            alg_method.update()

        npt.assert_allclose(x, x_truth, atol=1, rtol=1e-3)