def __init__(self, A, S, lmbda, opt=None, axes=(0, 1), caxis=None): """ Initialise a TVL2Deconv object with problem parameters. Parameters ---------- A : array_like Filter kernel corresponding to operator :math:`H` above S : array_like Signal vector or matrix lmbda : float Regularisation parameter opt : TVL2Deconv.Options object Algorithm options axes : tuple or list Axes on which TV regularisation is to be applied caxis : int or None, optional (default None) Axis on which channels of a multi-channel image are stacked. If None, TV regularisation is applied indepdendently to each channel, otherwise Vector TV :cite:`blomgren-1998-color` regularisation is applied jointly to all channels. """ if opt is None: opt = TVL2Deconv.Options() # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) self.S = np.asarray(S, dtype=self.dtype) self.axes = axes if caxis is None: self.saxes = (-1,) else: self.saxes = (caxis, -1) self.lmbda = self.dtype.type(lmbda) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(2.0*self.lmbda + 0.1), dtype=self.dtype) yshape = S.shape + (len(axes),) super(TVL2Deconv, self).__init__(S.size, yshape, yshape, S.dtype, opt) self.axshp = [S.shape[k] for k in axes] self.A = sl.atleast_nd(S.ndim, A.astype(self.dtype)) self.Af = sl.rfftn(self.A, self.axshp, axes=axes) self.Sf = sl.rfftn(self.S, axes=axes) self.AHAf = np.conj(self.Af)*self.Af self.AHSf = np.conj(self.Af)*self.Sf self.Wtv = np.asarray(self.opt['TVWeight'], dtype=self.dtype) if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim: self.Wtvna = self.Wtv[..., np.newaxis] else: self.Wtvna = self.Wtv # Construct gradient operators in frequency domain self.Gf, self.GHGf = sl.GradientFilters(S.ndim, axes, self.axshp, dtype=self.dtype)
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2): if opt is None: opt = ConvBPDNRecTV.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cbpdn.ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Call parent class __init__ Nx = np.product(self.cri.shpX) yshape = list(self.cri.shpX) yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt) # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.mu = self.dtype.type(mu) if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0: self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) + opt['TVWeight'].shape), dtype=self.dtype) else: # Wtv is a scalar: no need to change shape self.Wtv = self.dtype.type(opt['TVWeight']) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute self.set_attr('rho_xi', opt['AutoRho','RsdlTarget'], dval=1.0, dtype=self.dtype) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN, self.cri.Nv, dtype=self.dtype) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) xfshp = list(self.cri.shpX) xfshp[dimN-1] = xfshp[dimN-1]//2 + 1 self.Xf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) self.setdict()
def __init__(self, A, S, lmbda, opt=None, axes=(0, 1), caxis=None): """ | **Call graph** .. image:: ../_static/jonga/tvl1dcn_init.svg :width: 20% :target: ../_static/jonga/tvl1dcn_init.svg | Parameters ---------- A : array_like Filter kernel corresponding to operator :math:`H` above S : array_like Signal vector or matrix lmbda : float Regularisation parameter opt : TVL1Deconv.Options object Algorithm options axes : tuple, optional (default (0,1)) Axes on which TV regularisation is to be applied caxis : int or None, optional (default None) Axis on which channels of a multi-channel image are stacked. If None, TV regularisation is applied indepdendently to each channel, otherwise Vector TV :cite:`blomgren-1998-color` regularisation is applied jointly to all channels. """ if opt is None: opt = TVL1Deconv.Options() # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) self.axes = axes self.axsz = tuple(np.asarray(S.shape)[list(axes)]) if caxis is None: self.saxes = (-1, ) else: self.saxes = (caxis, -1) self.lmbda = self.dtype.type(lmbda) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(2.0 * self.lmbda + 0.1), dtype=self.dtype) yshape = S.shape + (len(axes) + 1, ) self.S = np.asarray(S, dtype=self.dtype) super(TVL1Deconv, self).__init__(S.size, yshape, yshape, S.dtype, opt) self.axshp = tuple([S.shape[k] for k in axes]) self.A = sl.atleast_nd(S.ndim, A.astype(self.dtype)) self.Af = sl.rfftn(self.A, self.axshp, axes=axes) self.Sf = sl.rfftn(self.S, axes=axes) self.AHAf = np.conj(self.Af) * self.Af self.AHSf = np.conj(self.Af) * self.Sf self.Wdf = np.asarray(self.opt['DFidWeight'], dtype=self.dtype) self.Wtv = np.asarray(self.opt['TVWeight'], dtype=self.dtype) if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim: self.Wtvna = self.Wtv[..., np.newaxis] else: self.Wtvna = self.Wtv self.Gf, self.GHGf = sl.GradientFilters(S.ndim, axes, self.axshp, dtype=self.dtype) self.GAf = np.concatenate((self.Gf, self.Af[..., np.newaxis]), axis=self.Gf.ndim - 1)
def __init__(self, A, S, lmbda, opt=None, axes=(0,1), caxis=None): """ Initialise a TVL1Deconv object with problem parameters. Parameters ---------- A : array_like Filter kernel corresponding to operator :math:`H` above S : array_like Signal vector or matrix lmbda : float Regularisation parameter opt : TVL1Deconv.Options object Algorithm options axes : tuple, optional (default (0,1)) Axes on which TV regularisation is to be applied caxis : int or None, optional (default None) Axis on which channels of a multi-channel image are stacked. If None, TV regularisation is applied indepdendently to each channel, otherwise Vector TV :cite:`blomgren-1998-color` regularisation is applied jointly to all channels. """ if opt is None: opt = TVL1Deconv.Options() # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) self.axes = axes if caxis is None: self.saxes = (-1,) else: self.saxes = (caxis,-1) self.lmbda = self.dtype.type(lmbda) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(2.0*self.lmbda + 0.1), dtype=self.dtype) yshape = S.shape + (len(axes)+1,) self.S = np.asarray(S, dtype=self.dtype) super(TVL1Deconv, self).__init__(S.size, yshape, yshape, S.dtype, opt) self.axshp = [S.shape[k] for k in axes] self.A = sl.atleast_nd(S.ndim, A.astype(self.dtype)) self.Af = sl.rfftn(self.A, self.axshp, axes=axes) self.Sf = sl.rfftn(self.S, axes=axes) self.AHAf = np.conj(self.Af)*self.Af self.AHSf = np.conj(self.Af)*self.Sf self.Wdf = np.asarray(self.opt['DFidWeight'], dtype=self.dtype) self.Wtv = np.asarray(self.opt['TVWeight'], dtype=self.dtype) if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim: self.Wtvna = self.Wtv[...,np.newaxis] else: self.Wtvna = self.Wtv self.Gf, self.GHGf = sl.GradientFilters(S.ndim, axes, self.axshp, dtype=self.dtype) self.GAf = np.concatenate((self.Gf, self.Af[...,np.newaxis]), axis=self.Gf.ndim-1) # Increment `runtime` to reflect object initialisation # time. The timer object is reset to avoid double-counting of # elapsed time if a similar increment is applied in a derived # class __init__. self.runtime += self.timer.elapsed(reset=True)
def __init__(self, A, S, lmbda, opt=None, axes=(0, 1)): """ Initialise a TVL2Deconv object with problem parameters. Parameters ---------- A : array_like Filter kernel (see :math:`\mathbf{h}` above) S : array_like Signal vector or matrix lmbda : float Regularisation parameter opt : TVL2Deconv.Options object Algorithm options axes : tuple or list Axes on which TV regularisation is to be applied """ if opt is None: opt = TVL2Deconv.Options() # Set dtype attribute based on S.dtype and opt['DataType'] self.set_dtype(opt, S.dtype) self.S = np.asarray(S, dtype=self.dtype) self.axes = axes self.lmbda = self.dtype.type(lmbda) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(2.0 * self.lmbda + 0.1), dtype=self.dtype) yshape = S.shape + (len(axes), ) super(TVL2Deconv, self).__init__(S.size, yshape, yshape, S.dtype, opt) self.axshp = [S.shape[k] for k in axes] self.A = sl.atleast_nd(S.ndim, A.astype(self.dtype)) self.Af = sl.rfftn(self.A, self.axshp, axes=axes) self.Sf = sl.rfftn(self.S, axes=axes) self.AHAf = np.conj(self.Af) * self.Af self.AHSf = np.conj(self.Af) * self.Sf self.Wtv = np.asarray(self.opt['TVWeight'], dtype=self.dtype) if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim: self.Wtvna = self.Wtv[..., np.newaxis] else: self.Wtvna = self.Wtv # Construct gradient operators in frequency domain self.Gf, self.GHGf = sl.GradientFilters(S.ndim, axes, self.axshp, dtype=self.dtype) # Increment `runtime` to reflect object initialisation # time. The timer object is reset to avoid double-counting of # elapsed time if a similar increment is applied in a derived # class __init__. self.runtime += self.timer.elapsed(reset=True)
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2): """ Initialise a ConvBPDNRecTV object with problem parameters. | **Call graph** .. image:: _static/jonga/cbpdnrtv_init.svg :width: 20% :target: _static/jonga/cbpdnrtv_init.svg | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix lmbda : float Regularisation parameter (l1) mu : float Regularisation parameter (gradient) opt : :class:`ConvBPDNRecTV.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ if opt is None: opt = ConvBPDNRecTV.Options() # Infer problem dimensions and set relevant attributes of self self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN) # Call parent class __init__ Nx = np.product(self.cri.shpX) yshape = list(self.cri.shpX) yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt) # Set l1 term scaling and weight array self.lmbda = self.dtype.type(lmbda) self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype) self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri)) self.mu = self.dtype.type(mu) if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0: self.Wtv = np.asarray( opt['TVWeight'].reshape((1, ) * (dimN + 2) + opt['TVWeight'].shape), dtype=self.dtype) else: # Wtv is a scalar: no need to change shape self.Wtv = self.dtype.type(opt['TVWeight']) # Set penalty parameter self.set_attr('rho', opt['rho'], dval=(50.0 * self.lmbda + 1.0), dtype=self.dtype) # Set rho_xi attribute self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0, dtype=self.dtype) # Reshape D and S to standard layout self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype) self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype) # Compute signal in DFT domain self.Sf = sl.rfftn(self.S, None, self.cri.axisN) self.Gf, GHGf = sl.GradientFilters(self.cri.dimN + 3, self.cri.axisN, self.cri.Nv, dtype=self.dtype) # Initialise byte-aligned arrays for pyfftw self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype) xfshp = list(self.cri.shpX) xfshp[dimN - 1] = xfshp[dimN - 1] // 2 + 1 self.Xf = sl.pyfftw_empty_aligned(xfshp, dtype=sl.complex_dtype(self.dtype)) self.setdict()