def test_10cplx(self): N = 64 M = 4 Nd = 8 D = np.random.randn(Nd, Nd, M) + 1j * np.random.randn(Nd, Nd, M) X0 = np.zeros((N, N, M)) + 1j * np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X0[xp] = (np.random.randn(X0[xp].size) + 1j * np.random.randn(X0[xp].size)) S = np.sum(fftconv(D, X0, axes=(0, 1)), axis=2) lmbda = 1e-4 rho = 1e-1 opt = cbpdn.ConvBPDN.Options({ 'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'rho': rho, 'AutoRho': { 'Enabled': False } }) b = cbpdn.ConvBPDN(D, S, lmbda, opt) b.solve() X1 = b.Y.squeeze() assert rrs(X0, X1) < 5e-5 Sr = b.reconstruct().squeeze() assert rrs(S, Sr) < 1e-4
def test_10(self): N = 64 M = 4 Nd = 8 D = np.random.randn(Nd, Nd, M) X0 = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X0[xp] = np.random.randn(X0[xp].size) S = np.sum(sl.ifftn( sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None, (0, 1)).real, axis=2) lmbda = 1e-2 L = 1e3 opt = cbpdn.ConvBPDN.Options({ 'Verbose': False, 'MaxMainIter': 2000, 'RelStopTol': 1e-5, 'L': L, 'BackTrack': { 'Enabled': False } }) b = cbpdn.ConvBPDN(D, S, lmbda, opt) b.solve() X1 = b.X.squeeze() assert (sl.rrs(X0, X1) < 5e-4) Sr = b.reconstruct().squeeze() assert (sl.rrs(S, Sr) < 2e-4)
def test_11(self): N = 63 M = 4 Nd = 8 D = np.random.randn(Nd, Nd, M) X0 = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X0[xp] = np.random.randn(X0[xp].size) S = np.sum(sl.ifftn( sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None, (0, 1)).real, axis=2) lmbda = 1e-4 rho = 1e-1 opt = cbpdn.ConvBPDN.Options({ 'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'rho': rho, 'AutoRho': { 'Enabled': False } }) b = cbpdn.ConvBPDN(D, S, lmbda, opt) b.solve() X1 = b.Y.squeeze() assert sl.rrs(X0, X1) < 5e-5 Sr = b.reconstruct().squeeze() assert sl.rrs(S, Sr) < 1e-4
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U YUf = sl.rfftn(self.YU, None, self.cri.axisN) # The sum is over the extra axis indexing spatial gradient # operators G_i, *not* over axisM b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum( np.conj(self.Gf) * YUf[..., 0:-1], axis=-1)) if self.cri.Cd == 1: self.Xf[:] = sl.solvedbi_sm( self.Df, self.rho*self.GHGf + self.rho, b, self.c, self.cri.axisM) else: self.Xf[:] = sl.solvemdbi_ism( self.Df, self.rho*self.GHGf + self.rho, b, self.cri.axisM, self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM) if self.cri.Cd == 1: DHop = lambda x: np.conj(self.Df) * x else: DHop = lambda x: sl.inner(np.conj(self.Df), x, axis=self.cri.axisC) ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_02(self): N = 32 M = 4 Nd = 5 D0 = cr.normalise(cr.zeromean( np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0, 1)) * sl.fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2) rho = 1e-1 opt = ccmod.ConvCnstrMOD_CG.Options({'Verbose': False, 'MaxMainIter': 500, 'LinSolveCheck': True, 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho, 'AutoRho': {'Enabled': False}, 'CG': {'StopTol': 1e-5}}) Xr = X.reshape(X.shape[0:2] + (1, 1,) + X.shape[2:]) Sr = S.reshape(S.shape + (1,)) c = ccmod.ConvCnstrMOD_CG(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.Y, D0.shape).squeeze() assert sl.rrs(D0, D1) < 1e-4 assert np.array(c.getitstat().XSlvRelRes).max() < 1e-3
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U b = self.DSf + self.rho * fftn(self.YU, None, self.cri.axisN) if self.cri.Cd == 1: self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho, b, self.c, self.cri.axisM) else: self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho, b, self.cri.axisM, self.cri.axisC) self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM) if self.cri.Cd == 1: DHop = lambda x: np.conj(self.Df) * x else: DHop = lambda x: sl.inner(np.conj(self.Df), x, axis=self.cri.axisC) ax = DHop(Dop(self.Xf)) + self.rho * self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.YU[:] = self.Y - self.U self.block_sep0(self.YU)[:] += self.S Zf = sl.rfftn(self.YU, None, self.cri.axisN) Z0f = self.block_sep0(Zf) Z1f = self.block_sep1(Zf) DZ0f = np.conj(self.Df) * Z0f DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC) Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC) b = DZ0fBQ + Z1fQ Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0, b, self.c, axis=self.cri.axisM) self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * sl.inner(self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = self.rho * (DDXfBB + self.Xf) + \ self.mu * self.GHGf * self.Xf b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC) + Z1f) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_18(self): N = 64 M = 4 Nd = 8 D0 = cr.normalise(cr.zeromean(np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(ifftn( fftn(D0, (N, N), (0, 1)) * fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2) L = 50.0 opt = ccmod.ConvCnstrMOD.Options({ 'Verbose': False, 'MaxMainIter': 3000, 'ZeroMean': True, 'RelStopTol': 0., 'L': L, 'Monotone': True }) Xr = X.reshape(X.shape[0:2] + ( 1, 1, ) + X.shape[2:]) Sr = S.reshape(S.shape + (1, )) c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.X, D0.shape).squeeze() assert rrs(D0, D1) < 1e-4 assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
def test_13(self): N = 64 M = 4 Nd = 8 D0 = cr.normalise(cr.zeromean( np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0, 1)) * sl.fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2) L = 0.5 opt = ccmod.ConvCnstrMOD.Options( {'Verbose': False, 'MaxMainIter': 3000, 'ZeroMean': True, 'RelStopTol': 0., 'L': L, 'BackTrack': {'Enabled': True}}) Xr = X.reshape(X.shape[0:2] + (1, 1,) + X.shape[2:]) Sr = S.reshape(S.shape + (1,)) c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.X, D0.shape).squeeze() assert sl.rrs(D0, D1) < 1e-4 assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
def xstep(self): r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`. """ # This test reflects empirical evidence that two slightly # different implementations are faster for single or # multi-channel data. This kludge is intended to be temporary. if self.cri.Cd > 1: for i in range(self.Nb): self.xistep(i) else: self.YU[:] = self.Y[..., np.newaxis] - self.U b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \ + self.rho*sl.rfftn(self.YU, None, self.cri.axisN) for i in range(self.Nb): self.Xf[..., i] = sl.solvedbi_sm(self.Zf[..., [i], :], self.rho, b[..., i], axis=self.cri.axisM) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True) YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1) b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN) Xf = self.swapaxes(self.Xf) Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: np.conj(self.Zf) * x ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK, keepdims=True) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): """Minimise Augmented Lagrangian with respect to x.""" self.cgit = None self.YU[:] = self.Y - self.U b = self.ASf + self.rho * sl.rfftn(self.YU, None, self.cri.axisN) if self.opt['LinSolve'] == 'SM': self.Xf[:] = sl.solvemdbi_ism(self.Af, self.rho, b, self.cri.axisM, self.cri.axisK) else: self.Xf[:], cgit = sl.solvemdbi_cg(self.Af, self.rho, b, self.cri.axisM, self.cri.axisK, self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf) self.cgit = cgit self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Aop = lambda x: np.sum( self.Af * x, axis=self.cri.axisM, keepdims=True) AHop = lambda x: np.sum( np.conj(self.Af) * x, axis=self.cri.axisK, keepdims=True) ax = AHop(Aop(self.Xf)) + self.rho * self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_02(self): rho = 1e-1 opt = ccmod.ConvCnstrMOD_CG.Options({ 'Verbose': False, 'MaxMainIter': 500, 'LinSolveCheck': True, 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho, 'AutoRho': { 'Enabled': False }, 'CG': { 'StopTol': 1e-5 } }) Xr = self.X.reshape(self.X.shape[0:2] + ( 1, 1, ) + self.X.shape[2:]) Sr = self.S.reshape(self.S.shape + (1, )) c = ccmod.ConvCnstrMOD_CG(Xr, Sr, self.D0.shape, opt) c.solve() D1 = cr.bcrop(c.Y, self.D0.shape).squeeze() assert rrs(self.D0, D1) < 1e-4 assert np.array(c.getitstat().XSlvRelRes).max() < 1e-3
def test_02(self): N = 32 M = 4 Nd = 5 D0 = cr.normalise(cr.zeromean( np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0,1)) * sl.fftn(X, None, (0,1)), None, (0,1)).real, axis=2) rho = 1e-1 opt = ccmod.ConvCnstrMOD_CG.Options({'Verbose': False, 'MaxMainIter': 500, 'LinSolveCheck': True, 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho, 'AutoRho': {'Enabled': False}, 'CG': {'StopTol': 1e-5}}) Xr = X.reshape(X.shape[0:2] + (1,1,) + X.shape[2:]) Sr = S.reshape(S.shape + (1,)) c = ccmod.ConvCnstrMOD_CG(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.Y, D0.shape).squeeze() assert(sl.rrs(D0, D1) < 1e-4) assert(np.array(c.getitstat().XSlvRelRes).max() < 1e-3)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U YUf = sl.rfftn(self.YU, None, self.cri.axisN) # The sum is over the extra axis indexing spatial gradient # operators G_i, *not* over axisM b = self.DSf + self.rho * (YUf[..., -1] + self.Wtv * np.sum( np.conj(self.Gf) * YUf[..., 0:-1], axis=-1)) if self.cri.Cd == 1: self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho * self.GHGf + self.rho, b, self.c, self.cri.axisM) else: self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho * self.GHGf + self.rho, b, self.cri.axisM, self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM) if self.cri.Cd == 1: DHop = lambda x: np.conj(self.Df) * x else: DHop = lambda x: sl.inner( np.conj(self.Df), x, axis=self.cri.axisC) ax = DHop(Dop( self.Xf)) + (self.rho * self.GHGf + self.rho) * self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.YU[:] = self.Y - self.U self.block_sep0(self.YU)[:] += self.S Zf = sl.rfftn(self.YU, None, self.cri.axisN) Z0f = self.block_sep0(Zf) Z1f = self.block_sep1(Zf) DZ0f = np.conj(self.Df) * Z0f DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC) Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC) b = DZ0fBQ + Z1fQ Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0, b, self.c, axis=self.cri.axisM) self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * sl.inner(self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = self.rho * (DDXfBB + self.Xf) + \ self.mu * self.GHGf * self.Xf b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC) + Z1f) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`. """ # This test reflects empirical evidence that two slightly # different implementations are faster for single or # multi-channel data. This kludge is intended to be temporary. if self.cri.Cd > 1: for i in range(self.Nb): self.xistep(i) else: self.YU[:] = self.Y[..., np.newaxis] - self.U b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \ + self.rho*sl.rfftn(self.YU, None, self.cri.axisN) for i in range(self.Nb): self.Xf[..., i] = sl.solvedbi_sm( self.Zf[..., [i], :], self.rho, b[..., i], axis=self.cri.axisM) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True) YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1) b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN) Xf = self.swapaxes(self.Xf) Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: np.conj(self.Zf) * x ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK, keepdims=True) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.X = sl.idctii(self.Gamma*sl.dctii(self.Y + self.S - self.U, axes=self.axes), axes=self.axes) if self.opt['LinSolveCheck']: self.xrrs = sl.rrs(self.X + (self.lmbda/self.rho) * sl.idctii((self.Alpha**2)*sl.dctii(self.X, axes=self.axes), axes=self.axes), self.Y + self.S - self.U) else: self.xrrs = None
def test_07(self): rho = 1e-1 N = 64 M = 128 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho c, lwr = linalg.cho_factor(X, rho) Dslv = linalg.cho_solve_AATI(X, rho, S.dot(X.T) + rho*Z, c, lwr) assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv, S.dot(X.T) + rho*Z) < 1e-11)
def test_01(self): rho = 1e-1 N = 64 M = 128 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho lu, piv = linalg.lu_factor(D, rho) Xslv = linalg.lu_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, lu, piv) assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv, D.T.dot(S) + rho*Z) < 1e-11)
def test_09(self): rho = 1e-1 N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + \ rho*X - D.conj()*S) / rho Xslv = linalg.solvedbi_sm(D, rho, D.conj()*S + rho*Z) assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) + rho*Xslv, D.conj()*S + rho*Z) < 1e-11)
def test_07(self): rho = 1e-1 N = 64 M = 128 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho c, lwr = linalg.cho_factor(X, rho) Dslv = linalg.cho_solve_AATI(X, rho, S.dot(X.T) + rho*Z, c, lwr) assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv, S.dot(X.T) + rho*Z) < 1e-11)
def test_09(self): rho = 1e-1 N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + \ rho*X - D.conj()*S) / rho Xslv = linalg.solvedbi_sm(D, rho, D.conj()*S + rho*Z) assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) + rho*Xslv, D.conj()*S + rho*Z) < 1e-11)
def test_02(self): rho = 1e-1 N = 128 M = 64 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho lu, piv = linalg.lu_factor(D, rho) Xslv = linalg.lu_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, lu, piv) assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv, D.T.dot(S) + rho*Z) < 1e-14)
def test_04(self): rho = 1e-1 N = 128 M = 64 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho lu, piv = linalg.lu_factor(X, rho) Dslv = linalg.lu_solve_AATI(X, rho, S.dot(X.T) + rho*Z, lu, piv) assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv, S.dot(X.T) + rho*Z) < 1e-11)
def test_05(self): rho = 1e-1 N = 64 M = 128 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho c, lwr = linalg.cho_factor(D, rho) Xslv = linalg.cho_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, c, lwr) assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv, D.T.dot(S) + rho*Z) < 1e-11)
def xstep(self): """Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" b = self.AHSf + np.sum(np.conj(self.GAf) * sl.rfftn(self.Y-self.U, axes=self.axes), axis=self.Y.ndim-1) self.Xf = b / (self.AHAf + self.GHGf) self.X = sl.irfftn(self.Xf, None, axes=self.axes) if self.opt['LinSolveCheck']: ax = (self.AHAf + self.GHGf)*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_06(self): rho = 1e-1 N = 128 M = 64 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho c, lwr = linalg.cho_factor(D, rho) Xslv = linalg.cho_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, c, lwr) assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv, D.T.dot(S) + rho*Z) < 1e-14)
def test_04(self): rho = 1e-1 N = 128 M = 64 K = 32 D = np.random.randn(N, M) X = np.random.randn(M, K) S = D.dot(X) Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho lu, piv = linalg.lu_factor(X, rho) Dslv = linalg.lu_solve_AATI(X, rho, S.dot(X.T) + rho*Z, lu, piv) assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv, S.dot(X.T) + rho*Z) < 1e-11)
def test_09(self): N = 64 M = 4 Nd = 8 D = np.random.randn(Nd, Nd, M) X0 = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X0[xp] = np.random.randn(X0[xp].size) S = np.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None, (0, 1)).real, axis=2) lmbda = 1e-4 rho = 3e-3 alpha = 6 opt = parcbpdn.ParConvBPDN.Options({'Verbose': False, 'MaxMainIter': 1000, 'RelStopTol': 1e-3, 'rho': rho, 'alpha': alpha, 'AutoRho': {'Enabled': False}}) b = parcbpdn.ParConvBPDN(D, S, lmbda, opt=opt) b.solve() X1 = b.Y.squeeze() assert sl.rrs(X0, X1) < 5e-5 Sr = b.reconstruct().squeeze() assert sl.rrs(S, Sr) < 1e-4
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.X = sl.idctii(self.Gamma*sl.dctii(self.Y + self.S - self.U, axes=self.axes), axes=self.axes) if self.opt['LinSolveCheck']: self.xrrs = sl.rrs( self.X + (self.lmbda/self.rho) * sl.idctii((self.Alpha**2) * sl.dctii(self.X, axes=self.axes), axes=self.axes), self.Y + self.S - self.U) else: self.xrrs = None
def test_11(self): N = 63 M = 4 Nd = 8 D = np.random.randn(Nd, Nd, M) X0 = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X0[xp] = np.random.randn(X0[xp].size) S = np.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None, (0, 1)).real, axis=2) lmbda = 1e-2 L = 1e3 opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 2000, 'RelStopTol': 1e-9, 'L': L, 'BackTrack': {'Enabled': False}}) b = cbpdn.ConvBPDN(D, S, lmbda, opt) b.solve() X1 = b.X.squeeze() assert sl.rrs(X0, X1) < 5e-4 Sr = b.reconstruct().squeeze() assert sl.rrs(S, Sr) < 2e-4
def test_10(self): N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) d = 1e-1 * (np.random.randn(N, N, 1, 1, M).astype('complex') + np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j) Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + d*X - D.conj()*S) / d Xslv = linalg.solvedbd_sm(D, d, D.conj()*S + d*Z) assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) + d*Xslv, D.conj()*S + d*Z) < 1e-11)
def test_15(self): rho = 1e-1 N = 32 M = 16 K = 8 D = complex_randn(N, N, 1, 1, M) X = complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Xop = lambda x: np.sum(X * x, axis=4, keepdims=True) XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True) Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho Dslv, cgit = linalg.solvemdbi_cg(X, rho, XHop(S)+rho*Z, 4, 3, tol=1e-6) assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) <= 1e-6
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.X = np.asarray(sl.lu_solve_ATAI( self.D, self.rho, self.DTS + self.rho * (self.Y - self.U), self.lu, self.piv), dtype=self.dtype) if self.opt['LinSolveCheck']: b = self.DTS + self.rho * (self.Y - self.U) ax = self.D.T.dot(self.D.dot(self.X)) + self.rho * self.X self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_10(self): N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) d = 1e-1 * (np.random.randn(N, N, 1, 1, M).astype('complex') + np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j) Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + d*X - D.conj()*S) / d Xslv = linalg.solvedbd_sm(D, d, D.conj()*S + d*Z) assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) + d*Xslv, D.conj()*S + d*Z) < 1e-11)
def xstep_check(self, b): r"""Check the minimisation of the Augmented Lagrangian with respect to :math:`\mathbf{x}` by method `xstep` defined in derived classes. This method should be called at the end of any `xstep` method. """ if self.opt['LinSolveCheck']: Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: sl.inner(np.conj(self.Zf), x, axis=self.cri.axisK) ax = ZHop(Zop(self.Xf)) + self.rho * self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep_check(self, b): r"""Check the minimisation of the Augmented Lagrangian with respect to :math:`\mathbf{x}` by method `xstep` defined in derived classes. This method should be called at the end of any `xstep` method. """ if self.opt['LinSolveCheck']: Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: sl.inner(np.conj(self.Zf), x, axis=self.cri.axisK) ax = ZHop(Zop(self.Xf)) + self.rho*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_13(self): rho = 1e-1 N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Xop = lambda x: np.sum(X * x, axis=4, keepdims=True) XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True) Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho Dslv = linalg.solvemdbi_rsm(X, rho, XHop(S) + rho*Z, 3) assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_15(self): rho = 1e-1 N = 32 M = 16 K = 8 D = util.complex_randn(N, N, 1, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Xop = lambda x: np.sum(X * x, axis=4, keepdims=True) XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True) Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho Dslv, cgit = linalg.solvemdbi_cg(X, rho, XHop(S)+rho*Z, 4, 3, tol=1e-6) assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) <= 1e-6
def test_05(self): rho = 1e-1 N = 64 M = 32 K = 8 D = np.random.randn(N, N, 1, 1, M).astype('complex') + \ np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j X = np.random.randn(N, N, 1, K, M).astype('complex') + \ np.random.randn(N, N, 1, K, M).astype('complex') * 1.0j S = np.sum(D*X, axis=4, keepdims=True) Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + \ rho*X - D.conj()*S) / rho Xslv = linalg.solvedbi_sm(D, rho, D.conj()*S + rho*Z) assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) + rho*Xslv, D.conj()*S + rho*Z) < 1e-11)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.X = np.asarray(sl.cho_solve_ATAI( self.D, self.rho, self.DTS + self.rho * (self.Y - self.U), self.lu, self.piv), dtype=self.dtype) if self.opt['LinSolveCheck']: b = self.DTS + self.rho * (self.Y - self.U) ax = self.D.T.dot(self.D.dot(self.X)) + self.rho*self.X self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ b = self.AHSf + self.rho*np.sum( np.conj(self.Gf)*sl.rfftn(self.Y-self.U, axes=self.axes), axis=self.Y.ndim-1) self.Xf = b / (self.AHAf + self.rho*self.GHGf) self.X = sl.irfftn(self.Xf, self.axsz, axes=self.axes) if self.opt['LinSolveCheck']: ax = (self.AHAf + self.rho*self.GHGf)*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self): """Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" ngsit = 0 gsrrs = np.inf while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']: self.X = self.GaussSeidelStep(self.S, self.X, self.cnst_AT(self.Y - self.U), self.rho, self.lcw, self.Wdf2) gsrrs = sl.rrs( self.rho * self.cnst_AT(self.cnst_A(self.X)) + self.Wdf2 * self.X, self.Wdf2 * self.S + self.rho * self.cnst_AT(self.Y - self.U)) ngsit += 1 self.xs = (ngsit, gsrrs)
def test_14(self): rho = 1e-1 N = 64 M = 32 C = 3 K = 8 D = util.complex_randn(N, N, C, 1, M) X = util.complex_randn(N, N, 1, K, M) S = np.sum(D*X, axis=4, keepdims=True) Xop = lambda x: np.sum(X * x, axis=4, keepdims=True) XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True) Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho Dslv = linalg.solvemdbi_rsm(X, rho, XHop(S) + rho*Z, 3) assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_06(self): rho = 1e-1 N = 64 M = 32 K = 8 D = np.random.randn(N, N, 1, 1, M).astype('complex') + \ np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j X = np.random.randn(N, N, 1, K, M).astype('complex') + \ np.random.randn(N, N, 1, K, M).astype('complex') * 1.0j S = np.sum(D*X, axis=4, keepdims=True) Xop = lambda x: np.sum(X * x, axis=4, keepdims=True) XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True) Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho Dslv = linalg.solvemdbi_ism(X, rho, XHop(S) + rho*Z, 4, 3) assert(linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ ngsit = 0 gsrrs = np.inf while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']: self.X = self.GaussSeidelStep(self.S, self.X, self.cnst_AT(self.Y-self.U), self.rho, self.lcw, self.Wdf2) gsrrs = sl.rrs( self.rho*self.cnst_AT(self.cnst_A(self.X)) + self.Wdf2*self.X, self.Wdf2*self.S + self.rho*self.cnst_AT(self.Y - self.U)) ngsit += 1 self.xs = (ngsit, gsrrs)
def xstep(self): """Minimise Augmented Lagrangian with respect to x.""" ngsit = 0 gsrrs = np.inf YU = self.Y - self.U SYU = self.S + YU[..., -1] YU[..., -1] = 0.0 ATYU = self.cnst_AT(YU) while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']: self.X = self.GaussSeidelStep(SYU, self.X, ATYU, 1.0, self.lcw, 1.0) gsrrs = sl.rrs( self.cnst_AT(self.cnst_A(self.X)), self.cnst_AT(self.cnst_c() - self.cnst_B(self.Y) - self.U)) ngsit += 1 self.xs = (ngsit, gsrrs)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ ngsit = 0 gsrrs = np.inf YU = self.Y - self.U SYU = self.S + YU[..., -1] YU[..., -1] = 0.0 ATYU = self.cnst_AT(YU) while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']: self.X = self.GaussSeidelStep( SYU, self.X, ATYU, 1.0, self.lcw, 1.0) gsrrs = sl.rrs( self.cnst_AT(self.cnst_A(self.X)), self.cnst_AT(self.cnst_c() - self.cnst_B(self.Y) - self.U) ) ngsit += 1 self.xs = (ngsit, gsrrs)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U Zf = rfftn(self.YU, None, self.cri.axisN) ZfQ = dot(self.Q.T, Zf, axis=self.cri.axisC) b = self.DSfBQ + self.rho * ZfQ Xh = solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM) self.Xf[:] = dot(self.Q, Xh, axis=self.cri.axisC) self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * inner( self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = DDXfBB + self.rho * self.Xf b = dot(self.B.T, self.DSf, axis=self.cri.axisC) + \ self.rho * Zf self.xrrs = rrs(ax, b) else: self.xrrs = None
def test_02(self): N = 32 M = 4 Nd = 5 D0 = cr.normalise(cr.zeromean( np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0,1)) * sl.fftn(X, None, (0,1)), None, (0,1)).real, axis=2) L = 1e1 opt = ccmod.ConvCnstrMOD.Options({'Verbose': False, 'MaxMainIter': 3000, 'ZeroMean': True, 'RelStopTol': 1e-6, 'L': L, 'BackTrack': {'Enabled': True}}) Xr = X.reshape(X.shape[0:2] + (1,1,) + X.shape[2:]) Sr = S.reshape(S.shape + (1,)) c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.X, D0.shape).squeeze() assert(sl.rrs(D0, D1) < 1e-4) assert(np.array(c.getitstat().Rsdl)[-1] < 1e-5)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U Zf = sl.rfftn(self.YU, None, self.cri.axisN) ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC) b = self.DSfBQ + self.rho * ZfQ Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM) self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * sl.inner(self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = DDXfBB + self.rho * self.Xf b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \ self.rho * Zf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_17(self): b = np.array([0.0, 0.0, 2.0]) s = np.array([0.0, 0.0, 0.0]) r = 1.0 p = linalg.proj_l2ball(b, s, r) assert linalg.rrs(p, np.array([0.0, 0.0, 1.0])) < 1e-14
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U YUf = sl.rfftn(self.YU, None, self.cri.axisN) YUf0 = self.block_sep0(YUf) YUf1 = self.block_sep1(YUf) b = self.rho * np.sum(np.conj(self.GDf) * YUf1, axis=-1) if self.cri.Cd > 1: b = np.sum(b, axis=self.cri.axisC, keepdims=True) b += self.DSf + self.rho*YUf0 # Concatenate multiple GDf components on axisC. For # single-channel signals, and multi-channel signals with a # single-channel dictionary, we end up with sl.solvemdbi_ism # solving a linear system of rank dimN+1 (corresponding to the # dictionary and a gradient operator per spatial dimension) plus # an identity. For multi-channel signals with a multi-channel # dictionary, we end up with sl.solvemdbi_ism solving a linear # system of rank C.d (dimN+1) (corresponding to the dictionary # and a gradient operator per spatial dimension for each # channel) plus an identity. # The structure of the linear system to be solved depends on the # number of channels in the signal and dictionary. Both branches are # the same in the single-channel signal case (the choice of handling # it via the 'else' branch is somewhat arbitrary). if self.cri.C > 1 and self.cri.Cd == 1: # Concatenate multiple GDf components on the final axis # of GDf (that indexes the number of gradient operators). For # multi-channel signals with a single-channel dictionary, # sl.solvemdbi_ism has to solve a linear system of rank dimN+1 # (corresponding to the dictionary and a gradient operator per # spatial dimension) DfGDf = np.concatenate( [self.Df[..., np.newaxis],] + [np.sqrt(self.rho)*self.GDf[..., k, np.newaxis] for k in range(self.GDf.shape[-1])], axis=-1) self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b[..., np.newaxis], self.cri.axisM, -1)[..., 0] else: # Concatenate multiple GDf components on axisC. For multi-channel # signals with a multi-channel dictionary, sl.solvemdbi_ism has # to solve a linear system of rank C.d (dimN+1) (corresponding to # the dictionary and a gradient operator per spatial dimension # for each channel) plus an identity. DfGDf = np.concatenate( [self.Df,] + [np.sqrt(self.rho)*self.GDf[..., k] for k in range(self.GDf.shape[-1])], axis=self.cri.axisC) self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b, self.cri.axisM, self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: if self.cri.C > 1 and self.cri.Cd == 1: Dop = lambda x: sl.inner(DfGDf, x[..., np.newaxis], axis=self.cri.axisM) DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=-1) ax = DHop(Dop(self.Xf))[..., 0] + self.rho*self.Xf else: Dop = lambda x: sl.inner(DfGDf, x, axis=self.cri.axisM) DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=self.cri.axisC) ax = DHop(Dop(self.Xf)) + self.rho*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def test_17(self): b = np.array([0.0, 0.0, 2.0]) s = np.array([0.0, 0.0, 0.0]) r = 1.0 p = linalg.proj_l2ball(b, s, r) assert linalg.rrs(p, np.array([0.0, 0.0, 1.0])) < 1e-14