def test_05(self): N = 16 M = 8 X = np.random.randn(N, N, 1, 1, M) S = np.random.randn(N, N, 1) try: c = ccmod.ConvCnstrMOD(X, S, ((4, 4, 4),(8, 8, 4))) c.solve() except Exception as e: print(e) assert(0)
def test_04(self): N = 16 M = 4 Nd = 8 X = np.random.randn(N, N, 1, 1, M) S = np.random.randn(N, N, 1) try: c = ccmod.ConvCnstrMOD(X, S, (Nd, Nd, M)) c.solve() except Exception as e: print(e) assert(0)
def test_09(self): N = 16 M = 4 Nd = 8 X = np.random.randn(N, N, 1, 1, M) S = np.random.randn(N, N) try: opt = ccmod.ConvCnstrMODOptions( {'Verbose': False, 'MaxMainIter': 20}) c = ccmod.ConvCnstrMOD(X, S, (Nd, Nd, 1, M), opt=opt, dimK=0) c.solve() except Exception as e: print(e) assert(0)
def test_08(self): N = 16 M = 4 Nd = 8 X = np.random.randn(N, N, 1, 1, M) S = np.random.randn(N, N, 1) dt = np.float64 opt = ccmod.ConvCnstrMODOptions( {'Verbose': False, 'MaxMainIter': 20, 'BackTrack': {'Enabled': True}, 'DataType': dt}) c = ccmod.ConvCnstrMOD(X, S, (Nd, Nd, M), opt=opt) c.solve() assert(c.X.dtype == dt) assert(c.Xf.dtype == sl.complex_dtype(dt)) assert(c.Yf.dtype == sl.complex_dtype(dt))
def test_13(self): N = 16 M = 4 K = 2 Nc = 3 Nd = 8 X = np.random.randn(N, N, Nc, K, M) S = np.random.randn(N, N, Nc, K) try: opt = ccmod.ConvCnstrMODOptions( {'Verbose': False, 'MaxMainIter': 20}) c = ccmod.ConvCnstrMOD(X, S, (Nd, Nd, Nc, M), opt=opt) c.solve() except Exception as e: print(e) assert(0)
def test_06(self): N = 16 M = 4 Nc = 3 Nd = 8 X = np.random.randn(N, N, Nc, 1, M) S = np.random.randn(N, N, Nc) L = 2e3 try: opt = ccmod.ConvCnstrMODOptions({'Verbose': False, 'MaxMainIter': 100, 'L' : L}) c = ccmod.ConvCnstrMOD(X, S, (Nd, Nd, 1, M), opt=opt, dimK=0) c.solve() except Exception as e: print(e) assert(0) assert(np.array(c.getitstat().Rsdl)[-1] < 5e-3)
def test_13(self): N = 64 M = 4 Nd = 8 D0 = cr.normalise(cr.zeromean(np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2) X = np.zeros((N, N, M)) xr = np.random.randn(N, N, M) xp = np.abs(xr) > 3 X[xp] = np.random.randn(X[xp].size) S = np.sum(ifftn( fftn(D0, (N, N), (0, 1)) * fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2) L = 0.5 opt = ccmod.ConvCnstrMOD.Options({ 'Verbose': False, 'MaxMainIter': 3000, 'ZeroMean': True, 'RelStopTol': 0., 'L': L, 'BackTrack': { 'Enabled': True } }) Xr = X.reshape(X.shape[0:2] + ( 1, 1, ) + X.shape[2:]) Sr = S.reshape(S.shape + (1, )) c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt) c.solve() D1 = cr.bcrop(c.X, D0.shape).squeeze() assert rrs(D0, D1) < 1e-4 assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
def __init__(self, D0, S, lmbda=None, opt=None, dimK=1, dimN=2): """ Initialise a MixConvBPDNDictLearn object with problem size and options. Parameters ---------- D0 : array_like Initial dictionary array S : array_like Signal array lmbda : float Regularisation parameter opt : :class:`ConvBPDNDictLearn.Options` object Algorithm options dimK : int, optional (default 1) Number of signal dimensions. If there is only a single input signal (e.g. if `S` is a 2D array representing a single image) `dimK` must be set to 0. dimN : int, optional (default 2) Number of spatial/temporal dimensions """ if opt is None: opt = MixConvBPDNDictLearn.Options() self.opt = opt # Get dictionary size if self.opt['DictSize'] is None: dsz = D0.shape else: dsz = self.opt['DictSize'] # Construct object representing problem dimensions cri = cr.CDU_ConvRepIndexing(dsz, S, dimK, dimN) # Normalise dictionary D0 = cr.Pcn(D0, dsz, cri.Nv, dimN, cri.dimCd, crp=True, zm=opt['CCMOD', 'ZeroMean']) # Modify D update options to include initial values for Y and U opt['CCMOD'].update( {'X0': cr.zpad(cr.stdformD(D0, cri.C, cri.M, dimN), cri.Nv)}) # Create X update object xstep = Acbpdn.ConvBPDN(D0, S, lmbda, opt['CBPDN'], dimK=dimK, dimN=dimN) # Create D update object dstep = ccmod.ConvCnstrMOD(None, S, dsz, opt['CCMOD'], dimK=dimK, dimN=dimN) # Configure iteration statistics reporting if self.opt['AccurateDFid']: isxmap = { 'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl', 'XRho': 'Rho' } evlmap = {'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1'} else: isxmap = { 'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1', 'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl', 'XRho': 'Rho' } evlmap = {} if dstep.opt['BackTrack', 'Enabled']: isfld = [ 'Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr', 'XPrRsdl', 'XDlRsdl', 'XRho', 'D_F_Btrack', 'D_Q_Btrack', 'D_ItBt', 'D_L', 'Time' ] isdmap = { 'Cnstr': 'Cnstr', 'D_F_Btrack': 'F_Btrack', 'D_Q_Btrack': 'Q_Btrack', 'D_ItBt': 'IterBTrack', 'D_L': 'L' } hdrtxt = [ 'Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr', 'r_X', 's_X', u('ρ_X'), 'F_D', 'Q_D', 'It_D', 'L_D' ] hdrmap = { 'Itn': 'Iter', 'Fnc': 'ObjFun', 'DFid': 'DFid', u('ℓ1'): 'RegL1', 'Cnstr': 'Cnstr', 'r_X': 'XPrRsdl', 's_X': 'XDlRsdl', u('ρ_X'): 'XRho', 'F_D': 'D_F_Btrack', 'Q_D': 'D_Q_Btrack', 'It_D': 'D_ItBt', 'L_D': 'D_L' } else: isfld = [ 'Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr', 'XPrRsdl', 'XDlRsdl', 'XRho', 'D_L', 'Time' ] isdmap = {'Cnstr': 'Cnstr', 'D_L': 'L'} hdrtxt = [ 'Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr', 'r_X', 's_X', u('ρ_X'), 'L_D' ] hdrmap = { 'Itn': 'Iter', 'Fnc': 'ObjFun', 'DFid': 'DFid', u('ℓ1'): 'RegL1', 'Cnstr': 'Cnstr', 'r_X': 'XPrRsdl', 's_X': 'XDlRsdl', u('ρ_X'): 'XRho', 'L_D': 'D_L' } isc = dictlrn.IterStatsConfig(isfld=isfld, isxmap=isxmap, isdmap=isdmap, evlmap=evlmap, hdrtxt=hdrtxt, hdrmap=hdrmap) # Call parent constructor super(MixConvBPDNDictLearn, self).__init__(xstep, dstep, opt, isc)