Ejemplo n.º 1
0
 def test_L2Reg(self):
     shape = [6]
     lamda = 1.0
     P = prox.L2Reg(shape, lamda)
     x = util.randn(shape)
     y = P(0.1, x)
     npt.assert_allclose(y, x / (1 + lamda * 0.1))
Ejemplo n.º 2
0
 def test_UnitaryTransform(self):
     shape = [6]
     lamda = 1.0
     A = linop.FFT(shape)
     P = prox.UnitaryTransform(prox.L2Reg(shape, lamda), A)
     x = util.randn(shape)
     y = P(0.1, x)
     npt.assert_allclose(y, x / (1 + lamda * 0.1))
Ejemplo n.º 3
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)
        z = np.arange(n).reshape([n, 1])

        for mu in [0, 0.1]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for alg_name in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient'
                    ]:
                        with self.subTest(proxg=proxg,
                                          alg_name=alg_name,
                                          lamda=lamda,
                                          mu=mu):
                            if proxg is None:
                                prox_lamda = 0
                            else:
                                prox_lamda = lamda

                            x_numpy = np.linalg.solve(
                                _A.T @ _A +
                                (lamda + mu + prox_lamda) * np.eye(n),
                                _A.T @ y + mu * z)

                            if (alg_name == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        alg_name=alg_name,
                                        lamda=lamda,
                                        proxg=proxg,
                                        mu=mu,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    alg_name=alg_name,
                                    lamda=lamda,
                                    proxg=proxg,
                                    mu=mu,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
Ejemplo n.º 4
0
    def test_LinearLeastSquares(self):
        n = 5
        _A = np.eye(n) + 0.1 * np.ones([n, n])
        A = linop.MatMul([n, 1], _A)
        x = np.arange(n).reshape([n, 1])
        y = A(x)

        for z in [None, x.copy()]:
            for lamda in [0, 0.1]:
                for proxg in [None, prox.L2Reg([n, 1], lamda)]:
                    for solver in [
                            'GradientMethod', 'PrimalDualHybridGradient',
                            'ConjugateGradient', 'ADMM'
                    ]:
                        with self.subTest(proxg=proxg,
                                          solver=solver,
                                          lamda=lamda,
                                          z=z):
                            AHA = _A.T @ _A + lamda * np.eye(n)
                            AHy = _A.T @ y
                            if proxg is not None:
                                AHA += lamda * np.eye(n)

                            if z is not None:
                                AHy = _A.T @ y + lamda * z

                            x_numpy = np.linalg.solve(AHA, AHy)
                            if (solver == 'ConjugateGradient'
                                    and proxg is not None):
                                with self.assertRaises(ValueError):
                                    app.LinearLeastSquares(
                                        A,
                                        y,
                                        solver=solver,
                                        lamda=lamda,
                                        proxg=proxg,
                                        z=z,
                                        show_pbar=False).run()
                            else:
                                x_rec = app.LinearLeastSquares(
                                    A,
                                    y,
                                    solver=solver,
                                    lamda=lamda,
                                    proxg=proxg,
                                    z=z,
                                    show_pbar=False).run()

                                npt.assert_allclose(x_rec, x_numpy, atol=1e-3)
Ejemplo n.º 5
0
    def test_proxg_LinearLeastSquares(self):
        n = 5
        mat = np.eye(n) + 0.1 * util.randn([n, n])
        A = linop.MatMul([n, 1], mat)
        x = util.randn([n, 1])
        y = A(x)
        lamda = 0.1
        x_lstsq = np.linalg.solve(np.matmul(mat.conjugate().T, mat) + lamda * np.eye(n),
                                  np.matmul(mat.conjugate().T, y))

        proxg = prox.L2Reg([n, 1], lamda)
        x_rec = app.LinearLeastSquares(
            A, y, alg_name='GradientMethod', max_iter=1000, proxg=proxg).run()
        npt.assert_allclose(x_rec, x_lstsq)

        x_rec = app.LinearLeastSquares(A, y, max_iter=1000, proxg=proxg,
                                       alg_name='PrimalDualHybridGradient').run()
        npt.assert_allclose(x_rec, x_lstsq)
Ejemplo n.º 6
0
    def _get_PrimalDualHybridGradient(self):
        with self.y_device:
            y = -self.y
            A = self.A

        if self.proxg is None:
            proxg = prox.NoOp(self.x.shape)
        else:
            proxg = self.proxg

        if self.lamda > 0:

            def gradh(x):
                with backend.get_device(self.x):
                    gradh_x = 0
                    if self.lamda > 0:
                        if self.z is None:
                            gradh_x += self.lamda * x
                        else:
                            gradh_x += self.lamda * (x - self.z)

                    return gradh_x

            gamma_primal = self.lamda
        else:
            gradh = None
            gamma_primal = 0

        if self.G is None:
            proxfc = prox.L2Reg(y.shape, 1, y=y)
            gamma_dual = 1
        else:
            A = linop.Vstack([A, self.G])
            proxf1c = prox.L2Reg(self.y.shape, 1, y=y)
            proxf2c = prox.Conj(self.proxg)
            proxfc = prox.Stack([proxf1c, proxf2c])
            proxg = prox.NoOp(self.x.shape)
            gamma_dual = 0

        if self.tau is None:
            if self.sigma is None:
                self.sigma = 1

            S = linop.Multiply(A.oshape, self.sigma)
            AHA = A.H * S * A
            max_eig = MaxEig(AHA,
                             dtype=self.x.dtype,
                             device=self.x_device,
                             max_iter=self.max_power_iter,
                             show_pbar=self.show_pbar).run()

            self.tau = 1 / (max_eig + self.lamda)
        else:
            T = linop.Multiply(A.ishape, self.tau)
            AAH = A * T * A.H

            max_eig = MaxEig(AAH,
                             dtype=self.x.dtype,
                             device=self.x_device,
                             max_iter=self.max_power_iter,
                             show_pbar=self.show_pbar).run()

            self.sigma = 1 / max_eig

        with self.y_device:
            u = self.y_device.xp.zeros(A.oshape, dtype=self.y.dtype)

        self.alg = PrimalDualHybridGradient(proxfc,
                                            proxg,
                                            A,
                                            A.H,
                                            self.x,
                                            u,
                                            self.tau,
                                            self.sigma,
                                            gamma_primal=gamma_primal,
                                            gamma_dual=gamma_dual,
                                            gradh=gradh,
                                            max_iter=self.max_iter)
Ejemplo n.º 7
0
    def _get_PrimalDualHybridGradient(self):
        with self.y_device:
            A = self.A

        if self.lamda > 0:
            gamma_primal = self.lamda
            proxg = prox.L2Reg(self.x.shape,
                               self.lamda,
                               y=self.z,
                               proxh=self.proxg)
        else:
            gamma_primal = 0
            if self.proxg is None:
                proxg = prox.NoOp(self.x.shape)
            else:
                proxg = self.proxg

        if self.G is None:
            proxfc = prox.L2Reg(self.y.shape, 1, y=-self.y)
            gamma_dual = 1
        else:
            A = linop.Vstack([A, self.G])
            proxf1c = prox.L2Reg(self.y.shape, 1, y=-self.y)
            proxf2c = prox.Conj(proxg)
            proxfc = prox.Stack([proxf1c, proxf2c])
            proxg = prox.NoOp(self.x.shape)
            gamma_dual = 0

        if self.tau is None:
            if self.sigma is None:
                self.sigma = 1

            S = linop.Multiply(A.oshape, self.sigma)
            AHA = A.H * S * A
            max_eig = MaxEig(AHA,
                             dtype=self.x.dtype,
                             device=self.x_device,
                             max_iter=self.max_power_iter,
                             show_pbar=self.show_pbar).run()

            self.tau = 1 / max_eig
        elif self.sigma is None:
            T = linop.Multiply(A.ishape, self.tau)
            AAH = A * T * A.H

            max_eig = MaxEig(AAH,
                             dtype=self.x.dtype,
                             device=self.x_device,
                             max_iter=self.max_power_iter,
                             show_pbar=self.show_pbar).run()

            self.sigma = 1 / max_eig

        with self.y_device:
            u = self.y_device.xp.zeros(A.oshape, dtype=self.y.dtype)

        self.alg = PrimalDualHybridGradient(proxfc,
                                            proxg,
                                            A,
                                            A.H,
                                            self.x,
                                            u,
                                            self.tau,
                                            self.sigma,
                                            gamma_primal=gamma_primal,
                                            gamma_dual=gamma_dual,
                                            max_iter=self.max_iter)