def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2): if opt is None: opt = ConvBPDNRecTV.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cbpdn.ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Call parent class __init__ Nx = np.product(self.cri.shpX) yshape = list(self.cri.shpX) yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt) # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.mu = self.dtype.type(mu) if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0: self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) + opt['TVWeight'].shape), dtype=self.dtype) else: # Wtv is a scalar: no need to change shape self.Wtv = self.dtype.type(opt['TVWeight']) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute self.set_attr('rho_xi', opt['AutoRho','RsdlTarget'], dval=1.0, dtype=self.dtype) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN, self.cri.Nv, dtype=self.dtype) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) xfshp = list(self.cri.shpX) xfshp[dimN-1] = xfshp[dimN-1]//2 + 1 self.Xf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) self.setdict()
def init_vars(self, S, dimK): """Initalise variables required for sparse coding and dictionary update for training data `S`.""" Nv = S.shape[0:self.dimN] if self.cri is None or Nv != self.cri.Nv: self.cri = cr.CDU_ConvRepIndexing(self.dsz, S, dimK, self.dimN) if self.opt['CUDA_CBPDN']: if self.cri.Cd > 1 or self.cri.Cx > 1: raise ValueError('CUDA CBPDN solver can only be used for ' 'single channel problems') # self.Df = sl.pyfftw_byte_aligned(sl.rfftn(self.D, self.cri.Nv, # self.cri.axisN)) self.Df = pyfftw.byte_align(sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)) self.Gf = sl.pyfftw_empty_aligned(self.Df.shape, self.Df.dtype) self.Z = sl.pyfftw_empty_aligned(self.cri.shpX, self.dtype) else: self.Df[:] = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
def setcoef(self, Z): """Set coefficient array.""" # If the dictionary has a single channel but the input (and # therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: Z = Z.reshape(self.cri.Nv + (1, ) + (self.cri.Cx * self.cri.K, ) + (self.cri.M, )) if Z.shape != self.Z.shape: self.Z = sl.pyfftw_empty_aligned(Z.shape, self.dtype) self.Z[:] = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def __init__(self, D, S, lmbda, W=None, opt=None, dimK=None, dimN=2): """Initialise a ConvBPDNMask object with problem parameters. | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix lmbda : float Regularisation parameter W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). opt : :class:`ConvBPDNMask.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ super(ConvBPDNMask, self).__init__(D, S, lmbda, opt, dimK=dimK, dimN=dimN) # Set gradient step parameter #self.set_attr('L', opt['L'], dval=100.0, dtype=self.dtype) if W is None: W = np.array([1.0], dtype=self.dtype) self.W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=self.dtype) # Create byte aligned arrays for FFT calls self.WRy = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype) self.Ryf = sl.pyfftw_rfftn_empty_aligned(self.S.shape, self.cri.axisN, self.dtype)
def __init__(self, D, S, lmbda, W=None, opt=None, dimK=None, dimN=2): """ | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix lmbda : float Regularisation parameter W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). opt : :class:`ConvBPDNMask.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ super(ConvBPDNMask, self).__init__(D, S, lmbda, opt, dimK=dimK, dimN=dimN) if W is None: W = np.array([1.0], dtype=self.dtype) self.W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=self.dtype) # Create byte aligned arrays for FFT calls self.WRy = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype) self.Ryf = sl.pyfftw_rfftn_empty_aligned(self.S.shape, self.cri.axisN, self.dtype)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ Initialise a ConvCnstrMODMaskDcplBase object with problem size and options. Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). dsz : tuple Filter support size(s) opt : :class:`ConvCnstrMODMaskDcplBase.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODMaskDcplBase.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Convert W to internal shape W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=S.dtype) # Reshape W if necessary (see discussion of reshape of S below) if self.cri.Cd == 1 and self.cri.C > 1: # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a non-singleton # dimension in S. However, when S is reshaped to interleave axisC # and axisK on the same axis, broadcasting is no longer sufficient # unless axisC and axisK of W are either both singleton or both # of the same size as the corresponding axes of S. If neither of # these cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape(W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W # Call parent class __init__ Nx = self.cri.N * self.cri.Cd * self.cri.M CK = (self.cri.C if self.cri.Cd == 1 else 1) * self.cri.K shpY = list(self.cri.shpX) shpY[self.cri.axisC] = self.cri.Cd shpY[self.cri.axisK] = 1 shpY[self.cri.axisM] += CK super(ConvCnstrMODMaskDcplBase, self).__init__(Nx, shpY, self.cri.axisM, CK, S.dtype, opt) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1, self.cri.C * self.cri.K, 1)) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) xfshp = list(self.cri.Nv + (self.cri.Cd, 1, self.cri.M)) xfshp[dimN - 1] = xfshp[dimN - 1] // 2 + 1 self.Xf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) if Z is not None: self.setcoef(Z)
def __init__(self, D, B, S, lmbda, mu, W=None, opt=None, dimK=0, dimN=2): """ Parameters ---------- D : array_like Dictionary matrix B : array_like Standard dictionary array S : array_like Signal vector or matrix lmbda : float Regularisation parameter (l1) mu : float Regularisation parameter (gradient) W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). opt : :class:`ConvProdDictL1L1Grd.Options` object Algorithm options dimK : 0, 1, optional (default 0) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvProdDictL1L1Grd.Options() # Keep a record of the B dictionary self.set_dtype(opt, S.dtype) self.B = np.asarray(B, dtype=self.dtype) # S is an N x C matrix, D is an N x N M_D matrix, B is a C x M_B # matrix, and X is an N M x M_B matrix. The base class of this # class expects that X is N M x C (i.e. the same number of columns # as in S), so we pass its initialiser the product S B, which is # a N x M_B matrix, so that it initialises arrays with the correct # number of channels. This is the first of many nasty hacks in # this class! scidx = -2 if dimK == 1 else -1 SB = sl.dot(B.T, S, axis=scidx) super(ConvProdDictL1L1Grd, self).__init__( D, SB, lmbda, mu, W=W, opt=opt, dimK=dimK, dimN=dimN) # Ensure that the dictionary is single channel if self.cri.Cd > 1: raise ValueError('Only single-channel convolutional dictionaries' ' are supported') # We need to correct the shape of S due to the modified S passed to # the base class initialiser shpS = list(self.cri.shpS) shpS[self.cri.axisC] = S.shape[self.cri.axisC] self.cri.shpS = tuple(shpS) self.S = np.asarray(S.reshape(shpS), dtype=self.dtype) # We also need to correct the shapes of a number of other working # arrays because we have to change the mechanism for combining # the Y0 and Y1 blocks into a single array. In the base class # these arrays can just be concatenated on an appropriate axis, # but this is not possible here due to the different array # shapes. The solution is that the composite array is one # dimensional, with the component blocks being extracted via # one dimensional slicing and then reshaped to the appropriate # shapes. self.y0shp = self.cri.shpS self.y1shp = self.cri.shpX self.y0I = int(np.prod(np.array(self.y0shp[self.cri.axisC:]))) self.y1I = int(np.prod(np.array(self.y1shp[self.cri.axisC:]))) self.yshp = self.cri.shpX[0:self.cri.axisC:] + (self.y0I + self.y1I,) self.Y = np.zeros(self.yshp, dtype=self.dtype) self.U = np.zeros(self.yshp, dtype=self.dtype) self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
def __init__(self, Z, S, dsz, opt=None, dimK=1, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/ccmodcnsns_init.svg :width: 20% :target: ../_static/jonga/ccmodcnsns_init.svg """ # Set default options if none specified if opt is None: opt = ConvCnstrMOD_Consensus.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Handle possible reshape of channel axis onto multiple image axis # (see comment below) Nb = self.cri.K if self.cri.C == self.cri.Cd else \ self.cri.C * self.cri.K admm.ADMMConsensus.__init__(self, Nb, self.cri.shpD, S.dtype, opt) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=self.cri.K, dtype=self.dtype) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1,) + (self.cri.C*self.cri.K,) + (1,)) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Compute signal S in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) if Z is not None: self.setcoef(Z) self.X = sl.pyfftw_empty_aligned(self.xshape, dtype=self.dtype) # See comment on corresponding test in xstep method if self.cri.Cd > 1: self.YU = sl.pyfftw_empty_aligned(self.yshape, dtype=self.dtype) else: self.YU = sl.pyfftw_empty_aligned(self.xshape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.xshape, self.cri.axisN, self.dtype)
def __init__(self, Z, S, dsz, opt=None, dimK=1, dimN=2): """ This class supports an arbitrary number of spatial dimensions, `dimN`, with a default of 2. The input coefficient map array `Z` (usually labelled X, but renamed here to avoid confusion with the X and Y variables in the ADMM base class) is expected to be in standard form as computed by the ConvBPDN class. The input signal set `S` is either `dimN` dimensional (no channels, only one signal), `dimN` +1 dimensional (either multiple channels or multiple signals), or `dimN` +2 dimensional (multiple channels and multiple signals). Parameter `dimK`, with a default value of 1, indicates the number of multiple-signal dimensions in `S`: :: Default dimK = 1, i.e. assume input S is of form S(N0, N1, C, K) or S(N0, N1, K) If dimK = 0 then input S is of form S(N0, N1, C, K) or S(N0, N1, C) The internal data layout for S, D (X here), and X (Z here) is: :: dim<0> - dim<Nds-1> : Spatial dimensions, product of N0,N1,... is N dim<Nds> : C number of channels in S and D dim<Nds+1> : K number of signals in S dim<Nds+2> : M number of filters in D sptl. chn sig flt S(N0, N1, C, K, 1) D(N0, N1, C, 1, M) (X here) X(N0, N1, 1, K, M) (Z here) The `dsz` parameter indicates the desired filter supports in the output dictionary, since this cannot be inferred from the input variables. The format is the same as the `dsz` parameter of :func:`.cnvrep.bcrop`. Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array dsz : tuple Filter support size(s) opt : ccmod.Options object Algorithm options dimK : int, optional (default 1) Number of dimensions for multiple signals in input S dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODBase.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Call parent class __init__ super(ConvCnstrMODBase, self).__init__(self.cri.shpD, S.dtype, opt) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=self.cri.K, dtype=self.dtype) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1,) + (self.cri.C*self.cri.K,) + (1,)) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Compute signal S in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) # Create byte aligned arrays for FFT calls self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.Y.shape, self.cri.axisN, self.dtype) if Z is not None: self.setcoef(Z)
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/cbpdnrtv_init.svg :width: 20% :target: ../_static/jonga/cbpdnrtv_init.svg | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix lmbda : float Regularisation parameter (l1) mu : float Regularisation parameter (gradient) opt : :class:`ConvBPDNRecTV.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ if opt is None: opt = ConvBPDNRecTV.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Call parent class __init__ Nx = np.product(np.array(self.cri.shpX)) yshape = list(self.cri.shpX) yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt) # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri)) self.mu = self.dtype.type(mu) if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0: self.Wtv = np.asarray( opt['TVWeight'].reshape((1, ) * (dimN + 2) + opt['TVWeight'].shape), dtype=self.dtype) else: # Wtv is a scalar: no need to change shape self.Wtv = self.dtype.type(opt['TVWeight']) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0 * self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0, dtype=self.dtype) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) self.Gf, GHGf = sl.gradient_filters(self.cri.dimN + 3, self.cri.axisN, self.cri.Nv, dtype=self.dtype) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.cri.shpX, self.cri.axisN, self.dtype) self.setdict()
def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2): """ This class supports an arbitrary number of spatial dimensions, `dimN`, with a default of 2. The input dictionary `D` is either `dimN` + 1 dimensional, in which case each spatial component (image in the default case) is assumed to consist of a single channel, or `dimN` + 2 dimensional, in which case the final dimension is assumed to contain the channels (e.g. colour channels in the case of images). The input signal set `S` is either `dimN` dimensional (no channels, only one signal), `dimN` + 1 dimensional (either multiple channels or multiple signals), or `dimN` + 2 dimensional (multiple channels and multiple signals). Determination of problem dimensions is handled by :class:`.cnvrep.CSC_ConvRepIndexing`. | **Call graph** .. image:: ../_static/jonga/fista_cbpdn_init.svg :width: 20% :target: ../_static/jonga/fista_cbpdn_init.svg | Parameters ---------- D : array_like Dictionary array S : array_like Signal array lmbda : float Regularisation parameter opt : :class:`ConvBPDN.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial/temporal dimensions """ # Set default options if none specified if opt is None: opt = ConvBPDN.Options() # Infer problem dimensions and set relevant attributes of self if not hasattr(self, 'cri'): self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) # Set default lambda value if not specified if lmbda is None: cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) Df = sl.rfftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN) Sf = sl.rfftn(S.reshape(cri.shpS), axes=cri.axisN) b = np.conj(Df) * Sf lmbda = 0.1 * abs(b).max() # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) # Call parent class __init__ self.Xf = None xshape = self.cri.shpX super(ConvBPDN, self).__init__(xshape, S.dtype, opt) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create byte aligned arrays for FFT calls self.Y = self.X.copy() self.X = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.X[:] = self.Y # Initialise auxiliary variable Vf: Create byte aligned arrays # for FFT calls self.Vf = sl.pyfftw_rfftn_empty_aligned(self.X.shape, self.cri.axisN, self.dtype) self.Xf = sl.rfftn(self.X, None, self.cri.axisN) self.Yf = self.Xf.copy() self.store_prev() self.Yfprv = self.Yf.copy() + 1e5 self.setdict() # Initialization needed for back tracking (if selected) self.postinitialization_backtracking_DFT()
def __init__(self, Z, S, dsz, opt=None, dimK=1, dimN=2): """ This class supports an arbitrary number of spatial dimensions, `dimN`, with a default of 2. The input coefficient map array `Z` (usually labelled X, but renamed here to avoid confusion with the X and Y variables in the ADMM base class) is expected to be in standard form as computed by the ConvBPDN class. The input signal set `S` is either `dimN` dimensional (no channels, only one signal), `dimN` +1 dimensional (either multiple channels or multiple signals), or `dimN` +2 dimensional (multiple channels and multiple signals). Parameter `dimK`, with a default value of 1, indicates the number of multiple-signal dimensions in `S`: :: Default dimK = 1, i.e. assume input S is of form S(N0, N1, C, K) or S(N0, N1, K) If dimK = 0 then input S is of form S(N0, N1, C, K) or S(N0, N1, C) The internal data layout for S, D (X here), and X (Z here) is: :: dim<0> - dim<Nds-1> : Spatial dimensions, product of N0,N1,... is N dim<Nds> : C number of channels in S and D dim<Nds+1> : K number of signals in S dim<Nds+2> : M number of filters in D sptl. chn sig flt S(N0, N1, C, K, 1) D(N0, N1, C, 1, M) (X here) X(N0, N1, 1, K, M) (Z here) The `dsz` parameter indicates the desired filter supports in the output dictionary, since this cannot be inferred from the input variables. The format is the same as the `dsz` parameter of :func:`.cnvrep.bcrop`. Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array dsz : tuple Filter support size(s) opt : ccmod.Options object Algorithm options dimK : int, optional (default 1) Number of dimensions for multiple signals in input S dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODBase.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Call parent class __init__ super(ConvCnstrMODBase, self).__init__(self.cri.shpD, S.dtype, opt) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=self.cri.K, dtype=self.dtype) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1, ) + (self.cri.C * self.cri.K, ) + (1, )) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Compute signal S in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) # Create byte aligned arrays for FFT calls self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.Y.shape, self.cri.axisN, self.dtype) if Z is not None: self.setcoef(Z)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ Initialise a ConvCnstrMODMaskDcpl_Consensus object with problem size and options. | **Call graph** .. image:: _static/jonga/ccmodmdcnsns_init.svg :width: 20% :target: _static/jonga/ccmodmdcnsns_init.svg | Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). dsz : tuple Filter support size(s) opt : :class:`.ConvCnstrMOD_Consensus.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ccmod.ConvCnstrMOD_Consensus.Options() super(ConvCnstrMODMaskDcpl_Consensus, self).__init__(Z, S, dsz, opt=opt, dimK=dimK, dimN=dimN) # Convert W to internal shape W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=S.dtype) # Reshape W if necessary (see discussion of reshape of S in # ccmod.ConvCnstrMOD_Consensus.__init__) if self.cri.Cd == 1 and self.cri.C > 1: # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a non-singleton # dimension in S. However, when S is reshaped to interleave axisC # and axisK on the same axis, broadcasting is no longer sufficient # unless axisC and axisK of W are either both singleton or both # of the same size as the corresponding axes of S. If neither of # these cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape(W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) self.W = W.reshape(W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W # Initialise additional variables required for the different # splitting used in combining the consensus solution with mask # decoupling self.Y1 = np.zeros(self.S.shape, dtype=self.dtype) self.U1 = np.zeros(self.S.shape, dtype=self.dtype) self.YU1 = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/ccmodmdcnsns_init.svg :width: 20% :target: ../_static/jonga/ccmodmdcnsns_init.svg | Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). dsz : tuple Filter support size(s) opt : :class:`.ConvCnstrMOD_Consensus.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ccmod.ConvCnstrMOD_Consensus.Options() super(ConvCnstrMODMaskDcpl_Consensus, self).__init__( Z, S, dsz, opt=opt, dimK=dimK, dimN=dimN) # Convert W to internal shape if W is None: W = np.array([1.0], dtype=self.dtype) W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=S.dtype) # Reshape W if necessary (see discussion of reshape of S in # ccmod.ConvCnstrMOD_Consensus.__init__) if self.cri.Cd == 1 and self.cri.C > 1: # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a non-singleton # dimension in S. However, when S is reshaped to interleave axisC # and axisK on the same axis, broadcasting is no longer sufficient # unless axisC and axisK of W are either both singleton or both # of the same size as the corresponding axes of S. If neither of # these cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape( W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W # Initialise additional variables required for the different # splitting used in combining the consensus solution with mask # decoupling self.Y1 = np.zeros(self.S.shape, dtype=self.dtype) self.U1 = np.zeros(self.S.shape, dtype=self.dtype) self.YU1 = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with input array S (see :func:`.cnvrep.mskWshape` for more details). dsz : tuple Filter support size(s) opt : :class:`ConvCnstrMODMaskDcplBase.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODMaskDcplBase.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Convert W to internal shape W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=S.dtype) # Reshape W if necessary (see discussion of reshape of S below) if self.cri.Cd == 1 and self.cri.C > 1: # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a non-singleton # dimension in S. However, when S is reshaped to interleave axisC # and axisK on the same axis, broadcasting is no longer sufficient # unless axisC and axisK of W are either both singleton or both # of the same size as the corresponding axes of S. If neither of # these cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape( W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W # Call parent class __init__ Nx = self.cri.N * self.cri.Cd * self.cri.M CK = (self.cri.C if self.cri.Cd == 1 else 1) * self.cri.K shpY = list(self.cri.shpX) shpY[self.cri.axisC] = self.cri.Cd shpY[self.cri.axisK] = 1 shpY[self.cri.axisM] += CK super(ConvCnstrMODMaskDcplBase, self).__init__( Nx, shpY, self.cri.axisM, CK, S.dtype, opt) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1, self.cri.C*self.cri.K, 1)) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) xfshp = list(self.cri.Nv + (self.cri.Cd, 1, self.cri.M)) self.Xf = sl.pyfftw_rfftn_empty_aligned(xfshp, self.cri.axisN, self.dtype) if Z is not None: self.setcoef(Z)
def __init__(self, D, S, R, opt=None, lmbda=None, optx=None, dimK=None, dimN=2, *args, **kwargs): """ Parameters ---------- xstep : internal xstep object (e.g. xstep.ConvBPDN) D : array_like Dictionary array S : array_like Signal array R : array_like Rank array lmbda : list of float Regularisation parameter opt : list containing :class:`ConvBPDN.Options` object Algorithm options for each individual solver dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial/temporal dimensions *args Variable length list of arguments for constructor of internal xstep object (e.g. mu) **kwargs Keyword arguments for constructor of internal xstep object """ if opt is None: opt = AKConvBPDN.Options() self.opt = opt # Infer outer problem dimensions self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Parse mu if 'mu' in kwargs: mu = kwargs['mu'] else: mu = [0] * self.cri.dimN # Parse lmbda and optx if lmbda is None: lmbda = [None] * self.cri.dimN if optx is None: optx = [None] * self.cri.dimN # Parse isc if 'isc' in kwargs: isc = kwargs['isc'] else: isc = None # Store parameters self.lmbda = lmbda self.optx = optx self.mu = mu self.R = R # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=S.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype) # Compute signal in DFT domain self.Sf = sl.fftn(self.S, None, self.cri.axisN) # print('Sf shape %s \n' % (self.Sf.shape,)) # print('S shape %s \n' % (self.S.shape,)) # print('shpS %s \n' % (self.cri.shpS,)) # Signal uni-dim (kruskal) # self.Skf = np.reshape(self.Sf,[np.prod(np.array(self.Sf.shape)),1],order='F') # Decomposed Kruskal Initialization self.K = [] self.Kf = [] Nvf = [] for i, Nvi in enumerate(self.cri.Nv): # Ui Ki = np.random.randn(Nvi, np.sum(self.R)) Kfi = sl.pyfftw_empty_aligned(Ki.shape, self.Sf.dtype) Kfi[:] = sl.fftn(Ki, None, [0]) self.K.append(Ki) self.Kf.append(Kfi) Nvf.append(Kfi.shape[0]) self.Nvf = tuple(Nvf) # Fourier dimensions self.NC = int(np.prod(self.Nvf) * self.cri.Cd) # dict FFT self.setdict() # Init KCSC solver (Needs to be initiated inside AKConvBPDN because requires convolvedict() and reshapesignal()) self.xstep = [] for l in range(self.cri.dimN): Wl = self.convolvedict(l) # convolvedict cri_l = KCSC_ConvRepIndexing(self.cri, self.R, l) # cri KCSC self.xstep.append(KConvBPDN(Wl, np.reshape(self.Sf,cri_l.shpS,order='C'), cri_l,\ self.S.dtype, self.lmbda[l], self.mu[l], self.optx[l])) # Init isc if isc is None: isc_lst = [] # itStats from block-solver isc_fields = [] for i in range(self.cri.dimN): str_i = '_{0!s}'.format(i) isc_i = IterStatsConfig(isfld=[ 'ObjFun' + str_i, 'PrimalRsdl' + str_i, 'DualRsdl' + str_i, 'Rho' + str_i ], isxmap={ 'ObjFun' + str_i: 'ObjFun', 'PrimalRsdl' + str_i: 'PrimalRsdl', 'DualRsdl' + str_i: 'DualRsdl', 'Rho' + str_i: 'Rho' }, evlmap={}, hdrtxt=[ 'Fnc' + str_i, 'r' + str_i, 's' + str_i, u('ρ' + str_i) ], hdrmap={ 'Fnc' + str_i: 'ObjFun' + str_i, 'r' + str_i: 'PrimalRsdl' + str_i, 's' + str_i: 'DualRsdl' + str_i, u('ρ' + str_i): 'Rho' + str_i }) isc_fields += isc_i.IterationStats._fields isc_lst.append(isc_i) # isc_it = IterStatsConfig( # global itStats -> No, to be managed in dictlearn # isfld=['Iter','Time'], # isxmap={}, # evlmap={}, # hdrtxt=['Itn'], # hdrmap={'Itn': 'Iter'} # ) # # isc_fields += isc_it._fields self.isc_lst = isc_lst # self.isc_it = isc_it self.isc = collections.namedtuple('IterationStats', isc_fields) # Required because dictlrn.DictLearn assumes that all valid # xstep objects have an IterationStats attribute # self.IterationStats = self.xstep.IterationStats self.itstat = [] self.j = 0
def __init__(self, Wf, Sf, cri_K, dtype, lmbda=None, mu=0.0, opt=None): """ This class supports an arbitrary number of spatial dimensions, `dimN`, with a default of 2. The input dictionary `D` is either `dimN` + 1 dimensional, in which case each spatial component (image in the default case) is assumed to consist of a single channel, or `dimN` + 2 dimensional, in which case the final dimension is assumed to contain the channels (e.g. colour channels in the case of images). The input signal set `S` is either `dimN` dimensional (no channels, only one signal), `dimN` + 1 dimensional (either multiple channels or multiple signals), or `dimN` + 2 dimensional (multiple channels and multiple signals). Determination of problem dimensions is handled by :class:`.cnvrep.CSC_ConvRepIndexing`. | **Call graph** .. image:: ../_static/jonga/cbpdn_init.svg :width: 20% :target: ../_static/jonga/cbpdn_init.svg | Parameters ---------- D : array_like Dictionary array S : array_like Signal array lmbda : float Regularisation parameter opt : :class:`ConvBPDN.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial/temporal dimensions """ # Set default options if none specified if opt is None: opt = KConvBPDN.Options() # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, dtype) # problem dimensions self.cri = cri_K # Reshape D and S to standard layout (NOT NEEDED in AKConvBPDN) self.Wf = np.asarray(Wf.reshape(self.cri.shpD), dtype=self.dtype) self.Sf = np.asarray(Sf.reshape(self.cri.shpS), dtype=self.dtype) # self.Sf_ = np.moveaxis(Sf.reshape([self.cri.nv[0],self.cri.N_,1]),[0,1,2],[2,0,1]) # Set default lambda value if not specified if lmbda is None: b = np.conj(Df) * Sf lmbda = 0.1 * abs(b).max() # Set l1 term scaling self.lmbda = self.dtype.type(lmbda) # Set l2 term scaling self.mu = self.dtype.type(mu) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0 * self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute (see Sec. VI.C of wohlberg-2015-adaptive) if self.lmbda != 0.0: rho_xi = float((1.0 + (18.3)**(np.log10(self.lmbda) + 1.0))) else: rho_xi = 1.0 self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=rho_xi, dtype=self.dtype) # Call parent class __init__ (not ConvBPDN bc FFT domain data) super(cbpdn.GenericConvBPDN, self).__init__(self.cri.shpX, Sf.dtype, opt) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.Xf = sl.pyfftw_empty_aligned(self.Y.shape, self.dtype) self.c = [None] * self.cri.n # to be filled with cho_factor self.setdictf() # Set l1 term weight array self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.wl1 = self.wl1.reshape(Kl1Wshape(self.wl1, self.cri)) print('L1Weight %s \n' % (self.wl1, ))
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/ccmodmdfista_init.svg :width: 20% :target: ../_static/jonga/ccmodmdfista_init.svg | Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with the *internal* shape of input array S (see :class:`.cnvrep.CDU_ConvRepIndexing` for a discussion of the distinction between *external* and *internal* data layouts). dsz : tuple Filter support size(s) opt : :class:`ConvCnstrMODMasked.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODMask.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Append singleton dimensions to W if necessary if hasattr(W, 'ndim'): W = sl.atleast_nd(self.cri.dimN + 3, W) # Reshape W if necessary (see discussion of reshape of S in # ccmod base class) if self.cri.Cd == 1 and self.cri.C > 1 and hasattr(W, 'ndim'): # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a # non-singleton dimension in S. However, when S is # reshaped to interleave axisC and axisK on the same axis, # broadcasting is no longer sufficient unless axisC and # axisK of W are either both singleton or both of the same # size as the corresponding axes of S. If neither of these # cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape(W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W super(ConvCnstrMODMask, self).__init__(Z, S, dsz, opt, dimK, dimN) # Create byte aligned arrays for FFT calls self.WRy = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype) self.Ryf = sl.pyfftw_rfftn_empty_aligned(self.S.shape, self.cri.axisN, self.dtype)
def __init__(self, Z, S, dsz, opt=None, dimK=1, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/ccmodcnsns_init.svg :width: 20% :target: ../_static/jonga/ccmodcnsns_init.svg """ # Set default options if none specified if opt is None: opt = ConvCnstrMOD_Consensus.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Handle possible reshape of channel axis onto multiple image axis # (see comment below) Nb = self.cri.K if self.cri.C == self.cri.Cd else \ self.cri.C * self.cri.K admm.ADMMConsensus.__init__(self, Nb, self.cri.shpD, S.dtype, opt) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=self.cri.K, dtype=self.dtype) # Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed # to be taken from cbpdn, and therefore already in standard # form). If the dictionary has a single channel but the input # (and therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: self.S = S.reshape(self.cri.Nv + (1, ) + (self.cri.C * self.cri.K, ) + (1, )) else: self.S = S.reshape(self.cri.shpS) self.S = np.asarray(self.S, dtype=self.dtype) # Compute signal S in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create constraint set projection function self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd, zm=opt['ZeroMean']) if Z is not None: self.setcoef(Z) self.X = sl.pyfftw_empty_aligned(self.xshape, dtype=self.dtype) # See comment on corresponding test in xstep method if self.cri.Cd > 1: self.YU = sl.pyfftw_empty_aligned(self.yshape, dtype=self.dtype) else: self.YU = sl.pyfftw_empty_aligned(self.xshape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.xshape, self.cri.axisN, self.dtype)
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/ccmodmdfista_init.svg :width: 20% :target: ../_static/jonga/ccmodmdfista_init.svg | Parameters ---------- Z : array_like Coefficient map array S : array_like Signal array W : array_like Mask array. The array shape must be such that the array is compatible for multiplication with the *internal* shape of input array S (see :class:`.cnvrep.CDU_ConvRepIndexing` for a discussion of the distinction between *external* and *internal* data layouts). dsz : tuple Filter support size(s) opt : :class:`ConvCnstrMODMasked.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if none specified if opt is None: opt = ConvCnstrMODMask.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN) # Append singleton dimensions to W if necessary if hasattr(W, 'ndim'): W = sl.atleast_nd(self.cri.dimN + 3, W) # Reshape W if necessary (see discussion of reshape of S in # ccmod base class) if self.cri.Cd == 1 and self.cri.C > 1 and hasattr(W, 'ndim'): # In most cases broadcasting rules make it possible for W # to have a singleton dimension corresponding to a # non-singleton dimension in S. However, when S is # reshaped to interleave axisC and axisK on the same axis, # broadcasting is no longer sufficient unless axisC and # axisK of W are either both singleton or both of the same # size as the corresponding axes of S. If neither of these # cases holds, it is necessary to replicate the axis of W # (axisC or axisK) that does not have the same size as the # corresponding axis of S. shpw = list(W.shape) swck = shpw[self.cri.axisC] * shpw[self.cri.axisK] if swck > 1 and swck < self.cri.C * self.cri.K: if W.shape[self.cri.axisK] == 1 and self.cri.K > 1: shpw[self.cri.axisK] = self.cri.K else: shpw[self.cri.axisC] = self.cri.C W = np.broadcast_to(W, shpw) self.W = W.reshape( W.shape[0:self.cri.dimN] + (1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1)) else: self.W = W super(ConvCnstrMODMask, self).__init__(Z, S, dsz, opt, dimK, dimN) # Create byte aligned arrays for FFT calls self.WRy = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype) self.Ryf = sl.pyfftw_rfftn_empty_aligned(self.S.shape, self.cri.axisN, self.dtype)
def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2): """ Initialise a ConvBPDN object with problem parameters. This class supports an arbitrary number of spatial dimensions, `dimN`, with a default of 2. The input dictionary `D` is either `dimN` + 1 dimensional, in which case each spatial component (image in the default case) is assumed to consist of a single channel, or `dimN` + 2 dimensional, in which case the final dimension is assumed to contain the channels (e.g. colour channels in the case of images). The input signal set `S` is either `dimN` dimensional (no channels, only one signal), `dimN` + 1 dimensional (either multiple channels or multiple signals), or `dimN` + 2 dimensional (multiple channels and multiple signals). Determination of problem dimensions is handled by :class:`.cnvrep.CSC_ConvRepIndexing`. | **Call graph** .. image:: _static/jonga/fista_cbpdn_init.svg :width: 20% :target: _static/jonga/fista_cbpdn_init.svg | Parameters ---------- D : array_like Dictionary array S : array_like Signal array lmbda : float Regularisation parameter opt : :class:`ConvBPDN.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial/temporal dimensions """ # Set default options if none specified if opt is None: opt = ConvBPDN.Options() # Infer problem dimensions and set relevant attributes of self if not hasattr(self, 'cri'): self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) # Set default lambda value if not specified if lmbda is None: cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) Df = sl.rfftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN) Sf = sl.rfftn(S.reshape(cri.shpS), axes=cri.axisN) b = np.conj(Df) * Sf lmbda = 0.1 * abs(b).max() # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) # Call parent class __init__ xshape = self.cri.shpX super(ConvBPDN, self).__init__(xshape, S.dtype, opt) if self.opt['BackTrack', 'Enabled']: self.L /= self.lmbda # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) # Create byte aligned arrays for FFT calls xfshp = list(self.X.shape) xfshp[dimN - 1] = xfshp[dimN - 1] // 2 + 1 self.Xf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) # Initialise auxiliary variable Yf self.Yf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) self.Ryf = -self.Sf self.Xf = sl.rfftn(self.X, None, self.cri.axisN) self.Yf = self.Xf self.store_prev() self.setdict()
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/cbpdnrtv_init.svg :width: 20% :target: ../_static/jonga/cbpdnrtv_init.svg | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix lmbda : float Regularisation parameter (l1) mu : float Regularisation parameter (gradient) opt : :class:`ConvBPDNRecTV.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ if opt is None: opt = ConvBPDNRecTV.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Call parent class __init__ Nx = np.product(np.array(self.cri.shpX)) yshape = list(self.cri.shpX) yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt) # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri)) self.mu = self.dtype.type(mu) if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0: self.Wtv = np.asarray(opt['TVWeight'].reshape( (1,)*(dimN + 2) + opt['TVWeight'].shape), dtype=self.dtype) else: # Wtv is a scalar: no need to change shape self.Wtv = self.dtype.type(opt['TVWeight']) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0, dtype=self.dtype) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN, self.cri.Nv, dtype=self.dtype) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) self.Xf = sl.pyfftw_rfftn_empty_aligned(self.cri.shpX, self.cri.axisN, self.dtype) self.setdict()