示例#1
0
    def test_precond_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.01 * util.randn([n, n])
        A = linop.MatMul([n, 1], _A)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(_A, y, rcond=-1)[0]
        p = 1 / (np.sum(abs(_A)**2, axis=0).reshape([n, 1]))

        P = linop.Multiply([n, 1], p)
        x_rec = app.LinearLeastSquares(A, y, show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)

        alpha = 1 / app.MaxEig(P * A.H * A, show_pbar=False).run()
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='GradientMethod',
                                       alpha=alpha,
                                       max_power_iter=100,
                                       max_iter=1000,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)

        tau = p
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='PrimalDualHybridGradient',
                                       max_iter=1000,
                                       tau=tau,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)
示例#2
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)
        z = np.arange(n).reshape([n, 1])

        for mu in [0, 0.1]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for alg_name in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient'
                    ]:
                        with self.subTest(proxg=proxg,
                                          alg_name=alg_name,
                                          lamda=lamda,
                                          mu=mu):
                            if proxg is None:
                                prox_lamda = 0
                            else:
                                prox_lamda = lamda

                            x_numpy = np.linalg.solve(
                                _A.T @ _A +
                                (lamda + mu + prox_lamda) * np.eye(n),
                                _A.T @ y + mu * z)

                            if (alg_name == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        alg_name=alg_name,
                                        lamda=lamda,
                                        proxg=proxg,
                                        mu=mu,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    alg_name=alg_name,
                                    lamda=lamda,
                                    proxg=proxg,
                                    mu=mu,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
示例#3
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)

        for z in [None, x.copy()]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for solver in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient', 'ADMM'
                    ]:
                        with self.subTest(proxg=proxg,
                                          solver=solver,
                                          lamda=lamda,
                                          z=z):
                            AHA = _A.T @ _A + lamda * np.eye(n)
                            AHy = _A.T @ y
                            if proxg is not None:
                                AHA += lamda * np.eye(n)

                            if z is not None:
                                AHy = _A.T @ y + lamda * z

                            x_numpy = np.linalg.solve(AHA, AHy)
                            if (solver == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        solver=solver,
                                        lamda=lamda,
                                        proxg=proxg,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    solver=solver,
                                    lamda=lamda,
                                    proxg=proxg,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
示例#4
0
文件: app_test.py 项目: jtamir/sigpy
    def test_proxg_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        lamda = 0.1
        x_lstsq = np.linalg.solve(np.matmul(mat.conjugate().T, mat) + lamda * np.eye(n),
                                  np.matmul(mat.conjugate().T, y))

        proxg = prox.L2Reg([n, 1], lamda)
        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000, proxg=proxg).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000, proxg=proxg,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
示例#5
0
文件: app_test.py 项目: jtamir/sigpy
    def test_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]

        x_rec = app.LinearLeastSquares(A, y).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
示例#6
0
文件: app_test.py 项目: jtamir/sigpy
    def test_dual_precond_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]

        d = 1 / np.sum(abs(mat)**2, axis=1, keepdims=True).reshape([n, 1])        
        x_rec = app.LinearLeastSquares(A, y, alg_name='PrimalDualHybridGradient',
                                       max_iter=1000, sigma=d).run()
        npt.assert_allclose(x_rec, x_lstsq)
示例#7
0
文件: app_test.py 项目: jtamir/sigpy
    def test_l2reg_bias_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        z = util.randn([n, 1])
        lamda = 0.1
        mu = 0.01
        x_lstsq = np.linalg.solve(np.matmul(mat.conjugate().T, mat) + (lamda + mu) * np.eye(n),
                                  np.matmul(mat.conjugate().T, y) + mu * z)

        x_rec = app.LinearLeastSquares(A, y, lamda=lamda, mu=mu, z=z).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000, lamda=lamda, mu=mu, z=z).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000, lamda=lamda, mu=mu, z=z,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
示例#8
0
文件: app_test.py 项目: jtamir/sigpy
    def test_precond_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]
        p = 1 / (np.sum(abs(mat)**2, axis=0).reshape([n, 1]))

        P = linop.Multiply([n, 1], p)
        x_rec = app.LinearLeastSquares(A, y).run()
        npt.assert_allclose(x_rec, x_lstsq)

        alpha = p / app.MaxEig(P * A.H * A).run()
        x_rec = app.LinearLeastSquares(A, y, alg_name='GradientMethod',
                                       max_iter=1000, alpha=alpha).run()
        npt.assert_allclose(x_rec, x_lstsq)

        tau = p
        x_rec = app.LinearLeastSquares(A, y, alg_name='PrimalDualHybridGradient',
                                       max_iter=1000, tau=tau).run()
        npt.assert_allclose(x_rec, x_lstsq)
示例#9
0
    def test_dual_precond_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], _A)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(_A, y, rcond=-1)[0]

        d = 1 / np.sum(abs(_A)**2, axis=1, keepdims=True).reshape([n, 1])
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='PrimalDualHybridGradient',
                                       max_iter=1000,
                                       sigma=d,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)