Exemple #1
0
    def test_MaxEig(self):
        n = 5
        mat = util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        s = np.linalg.svd(mat, compute_uv=False)

        npt.assert_allclose(app.MaxEig(A.H * A, max_iter=100).run(), s[0]**2, atol=1e-2)
Exemple #2
0
    def test_precond_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.01 * util.randn([n, n])
        A = linop.MatMul([n, 1], _A)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(_A, y, rcond=-1)[0]
        p = 1 / (np.sum(abs(_A)**2, axis=0).reshape([n, 1]))

        P = linop.Multiply([n, 1], p)
        x_rec = app.LinearLeastSquares(A, y, show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)

        alpha = 1 / app.MaxEig(P * A.H * A, show_pbar=False).run()
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='GradientMethod',
                                       alpha=alpha,
                                       max_power_iter=100,
                                       max_iter=1000,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)

        tau = p
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='PrimalDualHybridGradient',
                                       max_iter=1000,
                                       tau=tau,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)
Exemple #3
0
 def test_MatMul(self):
     mshape = (5, 4, 2)
     ishape = (5, 2, 3)
     A = linop.MatMul(ishape, util.randn(mshape))
     self.check_linop_adjoint(A)
     self.check_linop_linear(A)
     self.check_linop_pickleable(A)
Exemple #4
0
    def test_dual_precond_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]

        d = 1 / np.sum(abs(mat)**2, axis=1, keepdims=True).reshape([n, 1])        
        x_rec = app.LinearLeastSquares(A, y, alg_name='PrimalDualHybridGradient',
                                       max_iter=1000, sigma=d).run()
        npt.assert_allclose(x_rec, x_lstsq)
Exemple #5
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)
        z = np.arange(n).reshape([n, 1])

        for mu in [0, 0.1]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for alg_name in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient'
                    ]:
                        with self.subTest(proxg=proxg,
                                          alg_name=alg_name,
                                          lamda=lamda,
                                          mu=mu):
                            if proxg is None:
                                prox_lamda = 0
                            else:
                                prox_lamda = lamda

                            x_numpy = np.linalg.solve(
                                _A.T @ _A +
                                (lamda + mu + prox_lamda) * np.eye(n),
                                _A.T @ y + mu * z)

                            if (alg_name == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        alg_name=alg_name,
                                        lamda=lamda,
                                        proxg=proxg,
                                        mu=mu,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    alg_name=alg_name,
                                    lamda=lamda,
                                    proxg=proxg,
                                    mu=mu,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
Exemple #6
0
    def test_L2ConstrainedMinimization(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)

        eps = 0

        def proxg(lamda, x):
            return x / (1 + lamda)

        x_rec = app.L2ConstrainedMinimization(A, y, proxg, eps).run()
        npt.assert_allclose(x_rec, x)
Exemple #7
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)

        for z in [None, x.copy()]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for solver in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient', 'ADMM'
                    ]:
                        with self.subTest(proxg=proxg,
                                          solver=solver,
                                          lamda=lamda,
                                          z=z):
                            AHA = _A.T @ _A + lamda * np.eye(n)
                            AHy = _A.T @ y
                            if proxg is not None:
                                AHA += lamda * np.eye(n)

                            if z is not None:
                                AHy = _A.T @ y + lamda * z

                            x_numpy = np.linalg.solve(AHA, AHy)
                            if (solver == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        solver=solver,
                                        lamda=lamda,
                                        proxg=proxg,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    solver=solver,
                                    lamda=lamda,
                                    proxg=proxg,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
Exemple #8
0
    def test_dual_precond_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], _A)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(_A, y, rcond=-1)[0]

        d = 1 / np.sum(abs(_A)**2, axis=1, keepdims=True).reshape([n, 1])
        x_rec = app.LinearLeastSquares(A,
                                       y,
                                       solver='PrimalDualHybridGradient',
                                       max_iter=1000,
                                       sigma=d,
                                       show_pbar=False).run()
        npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)
Exemple #9
0
    def test_proxg_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        lamda = 0.1
        x_lstsq = np.linalg.solve(np.matmul(mat.conjugate().T, mat) + lamda * np.eye(n),
                                  np.matmul(mat.conjugate().T, y))

        proxg = prox.L2Reg([n, 1], lamda)
        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000, proxg=proxg).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000, proxg=proxg,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
Exemple #10
0
    def test_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]

        x_rec = app.LinearLeastSquares(A, y).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
Exemple #11
0
    def test_L2ConstrainedMinimization(self):
        n = 5
        _A = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], _A)
        x = util.randn([n, 1])
        y = A(x)

        eps = 0

        def proxg(lamda, x):
            return x / (1 + lamda)

        x_rec = app.L2ConstrainedMinimization(A,
                                              y,
                                              proxg,
                                              eps,
                                              show_pbar=False).run()
        npt.assert_allclose(x_rec, x, atol=1e-3)
Exemple #12
0
    def test_SDMM(self):
        n = 5
        lamda = 0.1
        A, x_numpy, y = self.Ax_y_setup(n, lamda)
        y = np.expand_dims(y, 1)
        A = linop.MatMul(np.expand_dims(x_numpy, 1).shape, A)

        c_norm = None
        c_max = None
        mu = 10**8  # big mu ok since no constraints used
        rho_norm = 1
        rho_max = 1
        lam = 0.1
        cg_iters = 5
        max_iter = 10

        L = []
        c = [1]
        rho = [1]
        for ii in range(len(y) - 1):
            c.append(0.00012**2)
            rho.append(0.001)

        alg_method = alg.SDMM(A,
                              y,
                              lam,
                              L=L,
                              c=c,
                              c_max=c_max,
                              c_norm=c_norm,
                              mu=mu,
                              rho=rho,
                              rho_max=rho_max,
                              rho_norm=rho_norm,
                              eps_pri=10**-5,
                              eps_dual=10**-2,
                              max_cg_iter=cg_iters,
                              max_iter=max_iter)

        while not alg_method.done():
            alg_method.update()

        npt.assert_allclose(np.squeeze(abs(alg_method.x)), x_numpy)
Exemple #13
0
    def test_l2reg_bias_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        z = util.randn([n, 1])
        lamda = 0.1
        mu = 0.01
        x_lstsq = np.linalg.solve(np.matmul(mat.conjugate().T, mat) + (lamda + mu) * np.eye(n),
                                  np.matmul(mat.conjugate().T, y) + mu * z)

        x_rec = app.LinearLeastSquares(A, y, lamda=lamda, mu=mu, z=z).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000, lamda=lamda, mu=mu, z=z).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000, lamda=lamda, mu=mu, z=z,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
Exemple #14
0
    def test_precond_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0]
        p = 1 / (np.sum(abs(mat)**2, axis=0).reshape([n, 1]))

        P = linop.Multiply([n, 1], p)
        x_rec = app.LinearLeastSquares(A, y).run()
        npt.assert_allclose(x_rec, x_lstsq)

        alpha = p / app.MaxEig(P * A.H * A).run()
        x_rec = app.LinearLeastSquares(A, y, alg_name='GradientMethod',
                                       max_iter=1000, alpha=alpha).run()
        npt.assert_allclose(x_rec, x_lstsq)

        tau = p
        x_rec = app.LinearLeastSquares(A, y, alg_name='PrimalDualHybridGradient',
                                       max_iter=1000, tau=tau).run()
        npt.assert_allclose(x_rec, x_lstsq)
Exemple #15
0
    def test_GerchbergSaxton(self):
        n = 10
        lamda = 0.1
        A, x_numpy, y = self.Ax_y_setup(n, lamda)
        y = np.expand_dims(np.csingle(y), 1)
        x_numpy = np.expand_dims(x_numpy, 1)
        A = np.csingle(A)
        A = linop.MatMul(y.shape, A)
        x0 = np.zeros(A.ishape, dtype=np.complex)

        alg_method = alg.GerchbergSaxton(A,
                                         y,
                                         x0,
                                         max_iter=100,
                                         tol=10E-9,
                                         lamb=lamda)

        while (not alg_method.done()):
            alg_method.update()

        phs = np.conj(x_numpy * alg_method.x / abs(x_numpy * alg_method.x))
        npt.assert_allclose(alg_method.x * phs, x_numpy, rtol=1e-6)