def test_precond_LinearLeastSquares(self): n = 5 _A = np.eye(n) + 0.01 * util.randn([n, n]) A = linop.MatMul([n, 1], _A) x = util.randn([n, 1]) y = A(x) x_lstsq = np.linalg.lstsq(_A, y, rcond=-1)[0] p = 1 / (np.sum(abs(_A)**2, axis=0).reshape([n, 1])) P = linop.Multiply([n, 1], p) x_rec = app.LinearLeastSquares(A, y, show_pbar=False).run() npt.assert_allclose(x_rec, x_lstsq, atol=1e-3) alpha = 1 / app.MaxEig(P * A.H * A, show_pbar=False).run() x_rec = app.LinearLeastSquares(A, y, solver='GradientMethod', alpha=alpha, max_power_iter=100, max_iter=1000, show_pbar=False).run() npt.assert_allclose(x_rec, x_lstsq, atol=1e-3) tau = p x_rec = app.LinearLeastSquares(A, y, solver='PrimalDualHybridGradient', max_iter=1000, tau=tau, show_pbar=False).run() npt.assert_allclose(x_rec, x_lstsq, atol=1e-3)
def test_Multiply(self): # Test scalar ishape = [2] mult = 1.1 A = linop.Multiply(ishape, mult) self.check_linop_adjoint(A) self.check_linop_normal(A) self.check_linop_linear(A) self.check_linop_pickleable(A) x = np.array([1.0, 2.0], np.complex) y = np.array([1.1, 2.2], np.complex) npt.assert_allclose(A * x, y) # Test simple ishape = [2] mult = np.array([1.0, 2.0]) A = linop.Multiply(ishape, mult) self.check_linop_adjoint(A) self.check_linop_normal(A) self.check_linop_linear(A) self.check_linop_pickleable(A) x = np.array([1.0, 2.0], np.complex) y = np.array([1.0, 4.0], np.complex) npt.assert_allclose(A * x, y) # Test broadcasting ishape = [2] mult = np.array([[1.0, 2.0], [3.0, 4.0]]) A = linop.Multiply(ishape, mult) self.check_linop_adjoint(A) self.check_linop_normal(A) self.check_linop_linear(A) self.check_linop_pickleable(A) x = np.array([1.0, 2.0], np.complex) y = np.array([[1.0, 4.0], [3.0, 8.0]], np.complex) npt.assert_allclose(A * x, y)
def test_precond_LinearLeastSquares(self): n = 5 mat = np.eye(n) + 0.1 * util.randn([n, n]) A = linop.MatMul([n, 1], mat) x = util.randn([n, 1]) y = A(x) x_lstsq = np.linalg.lstsq(mat, y, rcond=-1)[0] p = 1 / (np.sum(abs(mat)**2, axis=0).reshape([n, 1])) P = linop.Multiply([n, 1], p) x_rec = app.LinearLeastSquares(A, y).run() npt.assert_allclose(x_rec, x_lstsq) alpha = p / app.MaxEig(P * A.H * A).run() x_rec = app.LinearLeastSquares(A, y, alg_name='GradientMethod', max_iter=1000, alpha=alpha).run() npt.assert_allclose(x_rec, x_lstsq) tau = p x_rec = app.LinearLeastSquares(A, y, alg_name='PrimalDualHybridGradient', max_iter=1000, tau=tau).run() npt.assert_allclose(x_rec, x_lstsq)
def _get_PrimalDualHybridGradient(self): with self.y_device: y = -self.y A = self.A if self.proxg is None: proxg = prox.NoOp(self.x.shape) else: proxg = self.proxg if self.lamda > 0: def gradh(x): with backend.get_device(self.x): gradh_x = 0 if self.lamda > 0: if self.z is None: gradh_x += self.lamda * x else: gradh_x += self.lamda * (x - self.z) return gradh_x gamma_primal = self.lamda else: gradh = None gamma_primal = 0 if self.G is None: proxfc = prox.L2Reg(y.shape, 1, y=y) gamma_dual = 1 else: A = linop.Vstack([A, self.G]) proxf1c = prox.L2Reg(self.y.shape, 1, y=y) proxf2c = prox.Conj(self.proxg) proxfc = prox.Stack([proxf1c, proxf2c]) proxg = prox.NoOp(self.x.shape) gamma_dual = 0 if self.tau is None: if self.sigma is None: self.sigma = 1 S = linop.Multiply(A.oshape, self.sigma) AHA = A.H * S * A max_eig = MaxEig(AHA, dtype=self.x.dtype, device=self.x_device, max_iter=self.max_power_iter, show_pbar=self.show_pbar).run() self.tau = 1 / (max_eig + self.lamda) else: T = linop.Multiply(A.ishape, self.tau) AAH = A * T * A.H max_eig = MaxEig(AAH, dtype=self.x.dtype, device=self.x_device, max_iter=self.max_power_iter, show_pbar=self.show_pbar).run() self.sigma = 1 / max_eig with self.y_device: u = self.y_device.xp.zeros(A.oshape, dtype=self.y.dtype) self.alg = PrimalDualHybridGradient(proxfc, proxg, A, A.H, self.x, u, self.tau, self.sigma, gamma_primal=gamma_primal, gamma_dual=gamma_dual, gradh=gradh, max_iter=self.max_iter)
def _get_PrimalDualHybridGradient(self): with self.y_device: A = self.A if self.lamda > 0: gamma_primal = self.lamda proxg = prox.L2Reg(self.x.shape, self.lamda, y=self.z, proxh=self.proxg) else: gamma_primal = 0 if self.proxg is None: proxg = prox.NoOp(self.x.shape) else: proxg = self.proxg if self.G is None: proxfc = prox.L2Reg(self.y.shape, 1, y=-self.y) gamma_dual = 1 else: A = linop.Vstack([A, self.G]) proxf1c = prox.L2Reg(self.y.shape, 1, y=-self.y) proxf2c = prox.Conj(proxg) proxfc = prox.Stack([proxf1c, proxf2c]) proxg = prox.NoOp(self.x.shape) gamma_dual = 0 if self.tau is None: if self.sigma is None: self.sigma = 1 S = linop.Multiply(A.oshape, self.sigma) AHA = A.H * S * A max_eig = MaxEig(AHA, dtype=self.x.dtype, device=self.x_device, max_iter=self.max_power_iter, show_pbar=self.show_pbar).run() self.tau = 1 / max_eig elif self.sigma is None: T = linop.Multiply(A.ishape, self.tau) AAH = A * T * A.H max_eig = MaxEig(AAH, dtype=self.x.dtype, device=self.x_device, max_iter=self.max_power_iter, show_pbar=self.show_pbar).run() self.sigma = 1 / max_eig with self.y_device: u = self.y_device.xp.zeros(A.oshape, dtype=self.y.dtype) self.alg = PrimalDualHybridGradient(proxfc, proxg, A, A.H, self.x, u, self.tau, self.sigma, gamma_primal=gamma_primal, gamma_dual=gamma_dual, max_iter=self.max_iter)