Exemple #1
0
    def __init__(self, D0, S0, lmbda=None, opt=None, dimK=1, dimN=2):
        if opt is None:
            opt = OnlineSliceDictLearn.Options()
        self.opt = opt

        self.set_dtype(opt, S0.dtype)

        self.cri = cr.CSC_ConvRepIndexing(D0, S0, dimK, dimN)

        self.isc = self.config_itstats()

        self.itstat = []
        self.j = 0

        # config for real application
        self.set_attr('lmbda', lmbda, dtype=self.dtype)
        self.set_attr('boundary', opt['Boundary'], dval='circulant_back')

        self.Sval = np.asarray(S0.reshape(self.cri.shpS), dtype=self.dtype)
        # [N, C, H, W]
        self.Sval = self.Sval.squeeze(-1).transpose(3, 2, 0, 1)
        self.Sval_slice = self.im2slices(self.Sval)

        self.dsz = D0.shape
        D0 = Pcn(D0, opt['CCMOD', 'ZeroMean'])
        self.D = np.asarray(D0.reshape(self.cri.shpD), dtype=self.dtype)
        # [Cin, Hc, Wc, Cout]; channel first
        self.D = self.D.squeeze(-2).transpose(2, 0, 1, 3)
        self.D = self.D.reshape(-1, self.D.shape[-1])

        if self.opt['Verbose'] and self.opt['StatusHeader']:
            self.isc.printheader()
Exemple #2
0
    def __init__(self, D0, S0, lmbda=None, opt=None, dimK=None, dimN=2):
        if opt is None:
            opt = OnlineDictLearnDenseSurrogate.Options()
        assert isinstance(opt, OnlineDictLearnDenseSurrogate.Options)
        self.opt = opt

        self.set_dtype(opt, S0.dtype)

        self.cri = cr.CSC_ConvRepIndexing(D0, S0, dimK=dimK, dimN=dimN)

        self.isc = self.config_itstats()
        self.itstat = []
        self.j = 0

        self.set_attr('lmbda', lmbda, dtype=self.dtype)

        D0 = Pcn(D0, opt['CCMOD', 'ZeroMean'])

        self.D = np.asarray(D0.reshape(self.cri.shpD), dtype=self.dtype)
        self.S0 = np.asarray(S0.reshape(self.cri.shpS), dtype=self.dtype)
        self.At = self.dtype.type(0.)
        self.Bt = self.dtype.type(0.)

        self.lmbda = self.dtype.type(lmbda)
        self.Lmbda = self.dtype.type(0.)
        self.p = self.dtype.type(self.opt['OCDL', 'p'])

        if self.opt['Verbose'] and self.opt['StatusHeader']:
            self.isc.printheader()
    def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):
        if opt is None:
            opt = ConvBPDNSliceFISTA.Options()

        if not hasattr(self, 'cri'):
            self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        self.set_dtype(opt, S.dtype)
        self.lmbda = self.dtype.type(lmbda)
        # set boundary condition
        self.set_attr('boundary',
                      opt['Boundary'],
                      dval='circulant_back',
                      dtype=None)
        self.setdict(D)

        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype)
        self.S = self.S.squeeze(-1).transpose((3, 2, 0, 1))
        self.S_slice = self.im2slices(self.S)
        xshape = (self.S_slice.shape[0], self.D.shape[1],
                  self.S_slice.shape[-1])
        Nx = np.prod(xshape)
        super().__init__(Nx, xshape, S.dtype, opt)
        if self.opt['BackTrack', 'Enabled']:
            self.L /= self.lmbda
        self.Y = self.X.copy()
        self.residual = -self.S_slice.copy()
Exemple #4
0
def coef_dic_update_L2_L0(parameter_gamma, parameter_rho_dic, parameter_mu, S0,
                          D, itr, thr, lay):

    S0 = 2 * S0
    S0 -= np.mean(S0, axis=(0, 1))
    S = S0

    cri = cr.CSC_ConvRepIndexing(D, S)
    dsz = D.shape

    X = the_minimum_l2_norm_solution(D, S, cri)
    parameter_sigma = setting_sigma(X)

    Xstep_iteration = itr[0]
    Dstep_iteration = itr[1]
    total_iteration = itr[2]

    bar_Lay = tqdm(total=len(parameter_sigma), desc=lay, leave=False)
    for l in parameter_sigma:
        bar_L = tqdm(total=total_iteration, desc='L', leave=False)
        for L in range(total_iteration):

            nabla_x = nabla_x_create(X, l)
            X = X - parameter_mu * nabla_x

            X = projection_to_solution_space_by_L2(D, X, S, parameter_gamma,
                                                   Xstep_iteration, thr, cri,
                                                   dsz)

            if (np.sum(X != 0) == 0):
                bar_L.update(total_iteration - L)
                break

            if ((l == parameter_sigma[0]) & (L == 0)):

                G = D
                H = np.zeros(D.shape)
            D, G, H = dictionary_learning_by_L2(X, S, dsz, G, H,
                                                parameter_rho_dic,
                                                Dstep_iteration, thr, cri)
            bar_L.update(1)
        bar_L.close()

        if (np.sum(X != 0) == 0):
            bar_Lay.update(len(parameter_sigma) - l)
            break

        bar_Lay.update(1)
    bar_Lay.close()

    #X = np.where((-0.5*parameter_sigma[-1] < X) & (X < parameter_sigma[-1]*0.5), 0, X)

    l0_norm = np.sum(X != 0)
    print("[" + lay + "] L0 norm: %d " % l0_norm)

    return D, X, l0_norm
Exemple #5
0
def feature_extraction_L1_L0(parameter_rho_coef, parameter_gamma, parameter_mu,
                             S0, D, itr, thr, lay):

    S0 = 2 * S0
    S0 -= np.mean(S0, axis=(0, 1))
    S = S0

    cri = cr.CSC_ConvRepIndexing(D, S)
    dsz = D.shape

    X = the_minimum_l2_norm_solution(D, S, cri)

    parameter_sigma = setting_sigma(X)

    Df = con.convert_to_Df(D, S, cri)

    Xstep_iteration = itr[0]
    total_iteration = itr[2]

    bar_Lay = tqdm(total=len(parameter_sigma), desc=lay, leave=False)
    for k in parameter_sigma:

        bar_L = tqdm(total=total_iteration, desc='L', leave=False)
        for L in range(total_iteration):

            nabla_x = nabla_x_create(X, k)
            X = X - parameter_mu * nabla_x

            if ((k == parameter_sigma[0]) & (L == 0)):
                Xf = con.convert_to_Xf(X, S, cri)
                Df = con.convert_to_Df(D, S, cri)
                XfDf = np.sum(Xf * Df, axis=cri.axisM)
                XD = con.convert_to_S(XfDf, cri)
                Y = XD - S
                U = np.zeros(S.shape)
            X, Y, U = projection_to_solution_space_by_L1(
                D, X, S, Y, U, parameter_rho_coef, parameter_gamma,
                Xstep_iteration, thr, cri, dsz)

            if (np.sum(X != 0) == 0):
                bar_L.update(total_iteration - L)
                break
            bar_L.update(1)
        bar_L.close()

        if (np.sum(X != 0) == 0):
            bar_Lay.update(len(parameter_sigma) - k)
            break

        bar_Lay.update(1)
    #X = np.where((-parameter_sigma[-1] < X) & (X < parameter_sigma[-1]), 0, X)
    bar_Lay.close()

    l0_norm = np.sum(X != 0)
    print("[" + lay + "] L0 norm: %d " % l0_norm)
    return X, l0_norm
Exemple #6
0
    def __init__(self, D0, S0, lmbda=None, opt=None, dimK=1, dimN=2):
        """Internally we use a 7-dim representation over blobs. This increases
        the spatial dimension of 2 to 4 to allow for extra dimensions for
        slices.

        -------------------------------------------------------------------
        blob     | spatial                                ,chn  ,sig  ,fil
        -------------------------------------------------------------------
        S        |  (H      ,  W      ,  1      ,  1      ,  C  ,  K  ,  1)
        D        |  (Hc     ,  Wc     ,  1      ,  1      ,  C  ,  1  ,  M)
        X        |  (H      ,  W      ,  1      ,  1      ,  1  ,  K  ,  M)
        Omega    |  (Hc     ,  Wc     ,  2Hc-1  ,  2Wc-1  ,  C  ,  1  ,  M)
        At       |  (2Hc-1  ,  2Wc-1  ,  1      ,  1      ,  1  ,  M  ,  M)
        Bt       |  (Hc     ,  Wc     ,  1      ,  1      ,  C  ,  1  ,  M)
        patches  |  (H      ,  W      ,  Hc     ,  Wc     ,  C  ,  K  ,  1)
        gamma    |  (H      ,  W      ,  2Hc-1  ,  2Wc-1  ,  1  ,  K  ,  M)
        -------------------------------------------------------------------

        Here the `signal` dimension of At is occupied by M, which comes from
        stripe dictionary Omega.
        """
        if opt is None:
            opt = OnlineDictLearnSliceSurrogate.Options()
        assert isinstance(opt, OnlineDictLearnSliceSurrogate.Options)
        self.opt = opt

        self.set_dtype(opt, S0.dtype)

        # insert extra dims
        D0 = D0[:, :, np.newaxis, np.newaxis, ...]
        S0 = S0[:, :, np.newaxis, np.newaxis, ...]

        assert dimN == 2
        self.cri = cr.CSC_ConvRepIndexing(D0, S0, dimK=None, dimN=4)
        self.osz = list(copy.deepcopy(self.cri.shpD))
        self.osz[2], self.osz[3] = 2 * self.osz[0] - 1, 2 * self.osz[1] - 1

        self.isc = self.config_itstats()
        self.itstat = []
        self.j = 0

        self.set_attr('lmbda', lmbda, dtype=self.dtype)

        D0 = Pcn(D0, opt['CCMOD', 'ZeroMean'])

        self.D = np.asarray(D0.reshape(self.cri.shpD), dtype=self.dtype)
        self.S0 = np.asarray(S0.reshape(self.cri.shpS), dtype=self.dtype)
        self.At = self.dtype.type(0.)
        self.Bt = self.dtype.type(0.)

        self.lmbda = self.dtype.type(lmbda)
        self.Lmbda = self.dtype.type(0.)
        self.p = self.dtype.type(self.opt['OCDL', 'p'])

        if self.opt['Verbose'] and self.opt['StatusHeader']:
            self.isc.printheader()
Exemple #7
0
    def __init__(self, D, S, opt=None, dimK=None, dimN=2):
        """
        This class supports an arbitrary number of spatial dimensions,
        `dimN`, with a default of 2. The input dictionary `D` is either
        `dimN` + 1 dimensional, in which case each spatial component
        (image in the default case) is assumed to consist of a single
        channel, or `dimN` + 2 dimensional, in which case the final
        dimension is assumed to contain the channels (e.g. colour
        channels in the case of images). The input signal set `S` is
        either `dimN` dimensional (no channels, only one signal),
        `dimN` + 1 dimensional (either multiple channels or multiple
        signals), or `dimN` + 2 dimensional (multiple channels and
        multiple signals). Determination of problem dimensions is
        handled by :class:`.cnvrep.CSC_ConvRepIndexing`.


        Parameters
        ----------
        D : array_like
          Dictionary array
        S : array_like
          Signal array
        opt : :class:`GenericConvBPDN.Options` object
          Algorithm options
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ComplexGenericConvBPDN.Options()

        # Infer problem dimensions and set relevant attributes of self
        if not hasattr(self, 'cri'):
            self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Call parent class __init__
        super(ComplexGenericConvBPDN, self).__init__(self.cri.shpX, S.dtype, opt)

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)

        # Compute signal in complex DFT domain
        self.Sf = fftn(self.S, None, self.cri.axisN)

        # Initialise byte-aligned arrays for pyfftw
        self.YU = empty_aligned(self.Y.shape, dtype=self.dtype)
        self.Xf = empty_aligned(self.Y.shape, dtype=self.dtype)

        self.setdict()
Exemple #8
0
 def test_02(self):
     N = 32
     M = 16
     L = 8
     K = 4
     D = np.random.randn(L, L, M)
     S = np.random.randn(N, N, K)
     cri = cnvrep.CSC_ConvRepIndexing(D, S, dimK=1)
     assert cri.M == M
     assert cri.K == K
     assert cri.Nv == (N, N)
Exemple #9
0
 def test_03(self):
     N = 32
     M = 16
     L = 8
     C = 3
     D = np.random.randn(L, L, M)
     S = np.random.randn(N, N, C)
     cri = cnvrep.CSC_ConvRepIndexing(D, S, dimK=0)
     assert cri.M == M
     assert cri.K == 1
     assert cri.C == 3
     assert cri.Nv == (N, N)
Exemple #10
0
    def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):
        r"""We use the same layout as ConvBPDN as input and output, but for
        internal computation we use a differnt layout.

        Internal Parameters
        -------------------
        X: [K, m, N]
          Convolutional representation of the input signal. m is the size
          of atom in a dictionary, K is the batch size of input signals,
          and N is the number of slices extracted from each signal (usually
          number of pixels in an image).
        Y: [K, n, N]
          Splitted variable with contraint :math:`D_l x_i - y_i = 0`.
          n represents the size of each slice.
        U: [K, n, N]
          Dual variable with the same size as Y.
        """
        if opt is None:
            opt = ConvBPDNSlice.Options()
        # Set dtype attribute based on S.dtype and opt['DataType']
        self.set_dtype(opt, S.dtype)
        if not hasattr(self, 'cri'):
            self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
        self.boundary = opt['Boundary']
        # Number of elements of sparse representation x is invariant to
        # slice/FFT solvers.
        Nx = np.product(self.cri.shpX)
        # NOTE: To incorporate with im2slices/slices2im, where each slice
        # is organized as in_channels x patch_size x patch_size, the dictionary
        # is first transposed to [in_channels, patch_size, patch_size, out_channels],
        # and then reshape to 2-D (N, M).
        self.setdict(D)
        # Externally the input signal should have a data layout as
        # S(N0, N1, ..., C, K).
        # First convert to common pytorch Variable layout.
        # [H, W, C, K, 1] -> [K, C, H, W]
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype)
        self.S = self.S.squeeze(-1).transpose((3, 2, 0, 1))
        # [K, n, N]
        self.S_slice = self.im2slices(self.S)
        self.lmbda = lmbda
        # Set penalty parameter if not set
        self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
                      dtype=self.dtype)
        super().__init__(Nx, self.S_slice.shape, self.S_slice.shape,
                         S.dtype, opt)
        self.X = np.zeros(
            (self.S_slice.shape[0], self.D.shape[-1], self.Y.shape[-1]),
            dtype=self.dtype
        )
        self.extra_timer = su.Timer(['xstep', 'ystep'])
Exemple #11
0
 def evaluate(self, S, X):
     """Optionally evaluate functional values."""
     if self.opt['AccurateDFid']:
         cri_s = cr.CSC_ConvRepIndexing(self.getdict(),
                                        S.squeeze(),
                                        dimK=None,
                                        dimN=2)
         Df = sl.rfftn(self.D.reshape(cri_s.shpD), cri_s.Nv, cri_s.axisN)
         Xf = sl.rfftn(X.reshape(cri_s.shpX), cri_s.Nv, cri_s.axisN)
         Sf = sl.rfftn(S.reshape(cri_s.shpS), cri_s.Nv, cri_s.axisN)
         Ef = sl.inner(Df, Xf, axis=cri_s.axisM) - Sf
         dfd = sl.rfl2norm2(Ef, S.shape, axis=cri_s.axisN) / 2.
         rl1 = np.sum(np.abs(X))
         evl = dict(DFid=dfd, RegL1=rl1, ObjFun=dfd + self.lmbda * rl1)
     else:
         evl = None
     return evl
Exemple #12
0
 def test_01(self):
     N = 32
     M = 16
     L = 8
     D = np.random.randn(L, L, M)
     S = np.random.randn(N, N)
     cri = cnvrep.CSC_ConvRepIndexing(D, S, dimK=0)
     assert cri.M == M
     assert cri.K == 1
     assert cri.Nv == (N, N)
     assert str(cri) != ''
     W = np.random.randn(N, N)
     assert cnvrep.l1Wshape(W, cri) == (N, N, 1, 1, 1)
     W = np.random.randn(N, N, M)
     assert cnvrep.l1Wshape(W, cri) == (N, N, 1, 1, M)
     W = np.random.randn(N, N, 1, 1, M)
     assert cnvrep.l1Wshape(W, cri) == (N, N, 1, 1, M)
Exemple #13
0
def feature_extraction_L2_L0(parameter_gamma, parameter_mu, S0, D, itr, thr,
                             lay):

    S0 = 2 * S0
    S0 -= np.mean(S0, axis=(0, 1))
    S = S0

    cri = cr.CSC_ConvRepIndexing(D, S)
    dsz = D.shape

    X = the_minimum_l2_norm_solution(D, S, cri)

    parameter_sigma = setting_sigma(X)

    Xstep_iteration = itr[0]
    total_iteration = itr[2]

    bar_Lay = tqdm(total=len(parameter_sigma), desc=lay, leave=False)
    for k in parameter_sigma:

        bar_L = tqdm(total=total_iteration, desc='L', leave=False)
        for L in range(total_iteration):

            nabla_x = nabla_x_create(X, k)
            X = X - parameter_mu * nabla_x

            X = projection_to_solution_space_by_L2(D, X, S, parameter_gamma,
                                                   Xstep_iteration, thr, cri,
                                                   dsz)
            if (np.sum(X != 0) == 0):
                bar_L.update(total_iteration - L)
                break
            bar_L.update(1)
        bar_L.close()

        if (np.sum(X != 0) == 0):
            bar_Lay.update(len(parameter_sigma) - k)
            break

        bar_Lay.update(1)
    bar_Lay.close()

    #X = np.where((-parameter_sigma[-1] < X) & (X < parameter_sigma[-1]), 0, X)
    l0_norm = np.sum(X != 0)
    print("[" + lay + "] L0 norm: %d " % l0_norm)
    return X, l0_norm
Exemple #14
0
    def __init__(self, D, B, S, lmbda, opt=None, dimK=None, dimN=2):
        """
        Parameters
        ----------
        D : array_like
          Convolutional dictionary array
        B : array_like
          Standard dictionary array
        S : array_like
          Signal array
        lmbda : float
          Regularisation parameter
        opt : :class:`ConvProdDictBPDN.Options` object
          Algorithm options
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ConvProdDictBPDN.Options()

        # Since D operates on X B^T, the number of channels in X is equal
        # to the number of columns in B rather than the number of channels
        # in S. Here we initialise the object representing the problem
        # dimensions and correct the shape of X as required.
        self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
        if self.cri.Cd > 1:
            raise ValueError('Only single-channel convolutional dictionaries'
                             ' are supported')
        shpX = list(self.cri.shpX)
        shpX[self.cri.axisC] = B.shape[1]
        self.cri.shpX = tuple(shpX)

        # Keep a record of the B dictionary
        self.set_dtype(opt, S.dtype)
        self.B = np.asarray(B, dtype=self.dtype)

        # Call parent constructor
        super(ConvProdDictBPDN, self).__init__(D, S, lmbda, opt, dimK, dimN)
Exemple #15
0
 def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):
     if opt is None:
         opt = ConvBPDNSliceTwoBlockCnstrnt.Options()
     # Set dtype attribute based on S.dtype and opt['DataType']
     self.set_dtype(opt, S.dtype)
     if not hasattr(self, 'cri'):
         self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
     self.lmbda = self.dtype.type(lmbda)
     # Set penalty parameter if not set
     self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
                   dtype=self.dtype)
     # Set xi if not set
     self.set_attr('tau_xi', opt['AutoRho', 'RsdlTarget'],
                   dval=(1.0+18.3**(np.log10(self.lmbda)+1.0)),
                   dtype=self.dtype)
     # set boundary condition
     self.set_attr('boundary', opt['Boundary'], dval='circulant_back',
                   dtype=None)
     # set weight factor between two constraints
     self.set_attr('gamma', opt['Gamma'], dval=1., dtype=self.dtype)
     self.setdict(D)
     # Number of elements of sparse representation x is invariant to
     # slice/FFT solvers.
     Nx = np.product(self.cri.shpX)
     # Externally the input signal should have a data layout as
     # S(N0, N1, ..., C, K).
     # First convert to common pytorch Variable layout.
     # [H, W, C, K, 1] -> [K, C, H, W]
     self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype)
     self.S = self.S.squeeze(-1).transpose((3, 2, 0, 1))
     # [K, n, N]
     self.S_slice = self.im2slices(self.S)
     yshape = (self.S_slice.shape[0], self.D.shape[0] + self.D.shape[1],
               self.S_slice.shape[-1])
     super().__init__(Nx, yshape, 1, self.D.shape[0], S.dtype, opt)
     self.X = np.zeros_like(self._Y1, dtype=self.dtype)
     self.extra_timer = su.Timer(['xstep', 'ystep'])
Exemple #16
0
    def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):
        """
        Initialise a ConvBPDN object with problem parameters.

        This class supports an arbitrary number of spatial dimensions,
        `dimN`, with a default of 2. The input dictionary `D` is either
        `dimN` + 1 dimensional, in which case each spatial component
        (image in the default case) is assumed to consist of a single
        channel, or `dimN` + 2 dimensional, in which case the final
        dimension is assumed to contain the channels (e.g. colour
        channels in the case of images). The input signal set `S` is
        either `dimN` dimensional (no channels, only one signal), `dimN` + 1
        dimensional (either multiple channels or multiple signals), or
        `dimN` + 2 dimensional (multiple channels and multiple signals).
        Determination of problem dimensions is handled by
        :class:`.cnvrep.CSC_ConvRepIndexing`.


        |

        **Call graph**

        .. image:: _static/jonga/fista_cbpdn_init.svg
           :width: 20%
           :target: _static/jonga/fista_cbpdn_init.svg

        |


        Parameters
        ----------
        D : array_like
          Dictionary array
        S : array_like
          Signal array
        lmbda : float
          Regularisation parameter
        opt : :class:`ConvBPDN.Options` object
          Algorithm options
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ConvBPDN.Options()

        # Infer problem dimensions and set relevant attributes of self
        if not hasattr(self, 'cri'):
            self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Set dtype attribute based on S.dtype and opt['DataType']
        self.set_dtype(opt, S.dtype)

        # Set default lambda value if not specified
        if lmbda is None:
            cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
            Df = sl.rfftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN)
            Sf = sl.rfftn(S.reshape(cri.shpS), axes=cri.axisN)
            b = np.conj(Df) * Sf
            lmbda = 0.1 * abs(b).max()

        # Set l1 term scaling and weight array
        self.lmbda = self.dtype.type(lmbda)
        self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)

        # Call parent class __init__
        xshape = self.cri.shpX
        super(ConvBPDN, self).__init__(xshape, S.dtype, opt)
        if self.opt['BackTrack', 'Enabled']:
            self.L /= self.lmbda

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)

        # Compute signal in DFT domain
        self.Sf = sl.rfftn(self.S, None, self.cri.axisN)

        # Create byte aligned arrays for FFT calls
        xfshp = list(self.X.shape)
        xfshp[dimN - 1] = xfshp[dimN - 1] // 2 + 1
        self.Xf = sl.pyfftw_empty_aligned(xfshp,
                                          dtype=sl.complex_dtype(self.dtype))

        # Initialise auxiliary variable Yf
        self.Yf = sl.pyfftw_empty_aligned(xfshp,
                                          dtype=sl.complex_dtype(self.dtype))

        self.Ryf = -self.Sf

        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
        self.Yf = self.Xf
        self.store_prev()

        self.setdict()
Exemple #17
0
    def __init__(self, D, S, lmbda=None, W=None, opt=None, nproc=None,
                 ngrp=None, dimK=None, dimN=2):
        """
        Parameters
        ----------
        D : array_like
          Dictionary matrix
        S : array_like
          Signal vector or matrix
        lmbda : float
          Regularisation parameter
        W : array_like
          Mask array. The array shape must be such that the array is
          compatible for multiplication with input array S (see
          :func:`.cnvrep.mskWshape` for more details).
        opt : :class:`ParConvBPDN.Options` object
          Algorithm options
        nproc : int
          Number of processes
        ngrp : int
          Number of groups in partition of filter indices
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial dimensions
        """

        self.pool = None

        # Set default options if none specified
        if opt is None:
            opt = ParConvBPDN.Options()

        # Set dtype attribute based on S.dtype and opt['DataType']
        self.set_dtype(opt, S.dtype)

        # Set default lambda value if not specified
        if lmbda is None:
            cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
            Df = sl.rfftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN)
            Sf = sl.rfftn(S.reshape(cri.shpS), axes=cri.axisN)
            b = np.conj(Df) * Sf
            lmbda = 0.1*abs(b).max()

        # Set l1 term scaling and weight array
        self.lmbda = self.dtype.type(lmbda)

        # Set penalty parameter
        self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
                      dtype=self.dtype)
        self.set_attr('alpha', opt['alpha'], dval=1.0,
                      dtype=self.dtype)

        # Set rho_xi attribute (see Sec. VI.C of wohlberg-2015-adaptive)
        # if self.lmbda != 0.0:
        #     rho_xi = (1.0 + (18.3)**(np.log10(self.lmbda) + 1.0))
        # else:
        #     rho_xi = 1.0
        # self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=rho_xi,
        #               dtype=self.dtype)

        # Call parent class __init__
        super(ParConvBPDN, self).__init__(D, S, opt, dimK, dimN)

        if nproc is None:
            if ngrp is None:
                self.nproc = min(mp.cpu_count(), self.cri.M)
                self.ngrp = self.nproc
            else:
                self.nproc = min(mp.cpu_count(), ngrp, self.cri.M)
                self.ngrp = ngrp
        else:
            if ngrp is None:
                self.ngrp = nproc
                self.nproc = nproc
            else:
                self.ngrp = ngrp
                self.nproc = nproc

        if W is None:
            W = np.array([1.0], dtype=self.dtype)
        self.W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),
                            dtype=self.dtype)
        self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
        self.wl1 = self.wl1.reshape(cr.l1Wshape(self.wl1, self.cri))

        self.xrrs = None

        # Initialise global variables
        # Conv Rep Indexing and parameter values for multiprocessing
        global mp_nproc
        mp_nproc = self.nproc
        global mp_ngrp
        mp_ngrp = self.ngrp
        global mp_Nv
        mp_Nv = self.cri.Nv
        global mp_axisN
        mp_axisN = tuple(i+1 for i in self.cri.axisN)
        global mp_C
        mp_C = self.cri.C
        global mp_Cd
        mp_Cd = self.cri.Cd
        global mp_axisC
        mp_axisC = self.cri.axisC+1
        global mp_axisM
        mp_axisM = 0
        global mp_NonNegCoef
        mp_NonNegCoef = self.opt['NonNegCoef']
        global mp_NoBndryCross
        mp_NoBndryCross = self.opt['NoBndryCross']
        global mp_Dshp
        mp_Dshp = self.D.shape

        # Parameters for optimization
        global mp_lmbda
        mp_lmbda = self.lmbda
        global mp_rho
        mp_rho = self.rho
        global mp_alpha
        mp_alpha = self.alpha
        global mp_rlx
        mp_rlx = self.rlx
        global mp_wl1
        init_mpraw('mp_wl1', np.moveaxis(self.wl1, self.cri.axisM, mp_axisM))

        # Matrices used in optimization
        global mp_S
        init_mpraw('mp_S', np.moveaxis(self.S*self.W**2, self.cri.axisM,
                                       mp_axisM))
        global mp_Df
        init_mpraw('mp_Df', np.moveaxis(self.Df, self.cri.axisM, mp_axisM))
        global mp_X
        init_mpraw('mp_X', np.moveaxis(self.Y, self.cri.axisM, mp_axisM))
        shp_X = list(mp_X.shape)
        global mp_Xnr
        mp_Xnr = mpraw_as_np(mp_X.shape, mp_X.dtype)
        global mp_Y0
        shp_Y0 = shp_X[:]
        shp_Y0[0] = self.ngrp
        shp_Y0[mp_axisC] = mp_C
        if self.opt['Y0'] is not None:
            init_mpraw('Y0', np.moveaxis(
                self.opt['Y0'].astype(self.dtype, copy=True),
                self.cri.axisM, mp_axisM))
        else:
            mp_Y0 = mpraw_as_np(shp_Y0, mp_X.dtype)
        global mp_Y0old
        mp_Y0old = mpraw_as_np(shp_Y0, mp_X.dtype)
        global mp_Y1
        if self.opt['Y1'] is not None:
            init_mpraw('Y1', np.moveaxis(
                self.opt['Y1'].astype(self.dtype, copy=True),
                self.cri.axisM, mp_axisM))
        else:
            mp_Y1 = mpraw_as_np(shp_X, mp_X.dtype)
        global mp_Y1old
        mp_Y1old = mpraw_as_np(shp_X, mp_X.dtype)
        global mp_U0
        if self.opt['U0'] is not None:
            init_mpraw('U0', np.moveaxis(
                self.opt['U0'].astype(self.dtype, copy=True),
                self.cri.axisM, mp_axisM))
        else:
            mp_U0 = mpraw_as_np(shp_Y0, mp_X.dtype)
        global mp_U1
        if self.opt['U1'] is not None:
            init_mpraw('U1', np.moveaxis(
                self.opt['U1'].astype(self.dtype, copy=True),
                self.cri.axisM, mp_axisM))
        else:
            mp_U1 = mpraw_as_np(shp_X, mp_X.dtype)
        global mp_DX
        mp_DX = mpraw_as_np(shp_Y0, mp_X.dtype)
        global mp_DXnr
        mp_DXnr = mpraw_as_np(shp_Y0, mp_X.dtype)

        # Variables used to solve the optimization efficiently
        global mp_inv_off_diag
        if self.W.ndim is self.cri.axisM+1:
            init_mpraw('mp_inv_off_diag', np.moveaxis(
                -self.W**2/(mp_rho*(mp_rho+self.W**2*mp_ngrp)),
                self.cri.axisM, mp_axisM))
        else:
            init_mpraw('mp_inv_off_diag',
                       -self.W**2/(mp_rho*(mp_rho+self.W**2*mp_ngrp)))
        global mp_grp
        mp_grp = [np.min(i) for i in
                  np.array_split(np.array(range(self.cri.M)),
                                 mp_ngrp)] + [self.cri.M, ]
        global mp_cache
        if self.opt['HighMemSolve'] and self.cri.Cd == 1:
            mp_cache = [sl.solvedbi_sm_c(mp_Df[k], np.conj(mp_Df[k]),
                                         mp_alpha**2, mp_axisM) for k in
                        np.array_split(np.array(range(self.cri.M)), self.ngrp)]
        else:
            mp_cache = [None for k in mp_grp]
        global mp_b
        shp_b = shp_Y0[:]
        shp_b[0] = 1
        mp_b = mpraw_as_np(shp_b, mp_X.dtype)

        # Residual and stopping criteria variables
        global mp_ry0
        mp_ry0 = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_ry1
        mp_ry1 = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_sy0
        mp_sy0 = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_sy1
        mp_sy1 = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_nrmAx
        mp_nrmAx = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_nrmBy
        mp_nrmBy = mpraw_as_np((self.ngrp,), mp_X.dtype)
        global mp_nrmu
        mp_nrmu = mpraw_as_np((self.ngrp,), mp_X.dtype)
Exemple #18
0
    def solve(self, S):
        self.cri = cr.CSC_ConvRepIndexing(self.getdict(), S,
                                          dimK=self.cri.dimK,
                                          dimN=self.cri.dimN)

        self.timer.start(['solve', 'solve_wo_eval'])

        # Initialize with CBPDN
        self.timer.start('xstep')
        copt = copy.deepcopy(self.opt['CBPDN'])
        if self.opt['OCDL', 'CUCBPDN']:
            X = np.stack([
                cucbpdn.cbpdn(self.getdict(), S[..., i].squeeze(),
                              self.lmbda, opt=copt) for i in range(S.shape[-1])
            ], axis=-2)
            X = np.asarray(X.reshape(self.cri.shpX), dtype=self.dtype)
        elif self.opt['OCDL', 'PARCBPDN']:
            popt = parcbpdn.ParConvBPDN.Options(dict(self.opt['CBPDN']))
            xstep = parcbpdn.ParConvBPDN(self.getdict(), S, self.lmbda,
                                         opt=popt,
                                         nproc=self.opt['OCDL', 'nproc'])
            X = xstep.solve()
            X = np.asarray(X.reshape(self.cri.shpX), dtype=self.dtype)
        else:
            xstep = cbpdn.ConvBPDN(self.getdict(), S, self.lmbda, opt=copt)
            xstep.solve()
            X = np.asarray(xstep.getcoef().reshape(self.cri.shpX),
                           dtype=self.dtype)
        self.timer.stop('xstep')

        # X = np.asarray(xstep.getcoef().reshape(self.cri.shpX), dtype=self.dtype)
        S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)

        # update At and Bt
        # (H, W, 1, K, M) -> (H, W, Hc, Wc, 1, K, M)
        self.timer.start('hessian')
        Xe = self.extend_code(X)
        self.update_At(Xe)
        self.update_Bt(Xe, S)
        self.timer.stop('hessian')
        self.Lmbda = self.dtype.type(self.alpha*self.Lmbda+1)

        # update dictionary with FISTA
        fopt = copy.deepcopy(self.opt['CCMOD'])
        fopt['X0'] = self.D
        if self.opt['OCDL', 'DiminishingTol']:
            if self.opt['OCDL', 'MinTol'] is None:
                min_tol = 0.
            else:
                min_tol = self.opt['OCDL', 'MinTol']
            fopt['RelStopTol'] = max(
                self.dtype.type(self.opt['CCMOD', 'RelStopTol']/(1.+self.j)),
                min_tol
            )
        self.timer.start('dstep')
        dstep = SpatialFISTA(self.At, self.Bt, opt=fopt)
        dstep.solve()
        self.timer.stop('dstep')

        # set dictionary
        self.setdict(dstep.getmin())

        self.timer.stop('solve_wo_eval')
        evl = self.evaluate(S, X)
        self.timer.start('solve_wo_eval')

        t = self.timer.elapsed(self.opt['IterTimer'])
        if self.opt['OCDL', 'CUCBPDN']:
            # this requires a slight modification of dictlrn
            itst = self.isc.iterstats(self.j, t, None, dstep.itstat[-1], evl)
        else:
            itst = self.isc.iterstats(self.j, t, xstep.itstat[-1],
                                      dstep.itstat[-1], evl)
        self.itstat.append(itst)

        if self.opt['Verbose']:
            self.isc.printiterstats(itst)

        self.j += 1

        self.timer.stop(['solve', 'solve_wo_eval'])

        if 0:
            import matplotlib.pyplot as plt
            plt.imshow(su.tiledict(self.getdict().squeeze()))
            plt.show()

        return self.getdict()
Exemple #19
0
S = getimages().astype(np.float32)
# saveimg2D(np.transpose(S, (2, 0, 1)), 'S.png')
# exit()
print(S.shape)
print("%d images, each is %dx%d." % (S.shape[2], S.shape[0], S.shape[1]))
# Sl, Sh = util.tikhonov_filter(S, 5, 16)
Sl = np.zeros_like(S)
Smean = np.mean(S*2, axis=(0, 1))
print(Smean)
Sh = S*2 - Smean

# TODO: explicitly zero-padding (for me, foolish)
D = np.random.randn(12, 12, 256)
# D = np.random.rand(12, 12, 64)*2 - 1
cri = cnvrep.CSC_ConvRepIndexing(D, S)
Dr0 = np.asarray(D.reshape(cri.shpD), dtype=S.dtype)
Slr = np.asarray(Sl.reshape(cri.shpS), dtype=S.dtype)
Shr = np.asarray(Sh.reshape(cri.shpS), dtype=S.dtype)
Shf = sl.rfftn(Shr, s=cri.Nv, axes=cri.axisN) # implicitly zero-padding

crop_op = []
for l in Dr0.shape:
    crop_op.append(slice(0, l))
crop_op = tuple(crop_op)
Dr0 = cnvrep.getPcn(Dr0.shape, cri.Nv, cri.dimN, cri.dimCd, zm=False)(cnvrep.zpad(Dr0, cri.Nv))[crop_op]
# Dr = normalize(Dr, axis=cri.axisM)

# Xr = l2norm_minimize(cri, Dr0, Shr)
# Dr = mysolve(cri, Dr0, Xr, Shr, 1e-4, maxitr=50, debug_dir='./debug')
# # Dr = nakashizuka_solve(cri, Dr0, Xr, Shr, debug_dir='./debug')
Exemple #20
0
    def solve(self, S, W=None):
        """Solve for given signal S, optionally with mask W."""
        self.cri = cr.CSC_ConvRepIndexing(self.D.squeeze()[:, :, None, None,
                                                           ...],
                                          S[:, :, None, None, ...],
                                          dimK=None,
                                          dimN=4)

        self.timer.start(['solve', 'solve_wo_eval'])

        # Initialize with CBPDN
        self.timer.start('xstep')
        copt = copy.deepcopy(self.opt['CBPDN'])
        if self.opt['OCDL', 'CUCBPDN']:
            X = cucbpdn.cbpdn(self.getdict(),
                              S.squeeze(),
                              self.lmbda,
                              opt=copt)
            X = np.asarray(X.reshape(self.cri.shpX), dtype=self.dtype)
        elif self.opt['OCDL', 'PARCBPDN']:
            popt = parcbpdn.ParConvBPDN.Options(dict(self.opt['CBPDN']))
            xstep = parcbpdn.ParConvBPDN(self.getdict(),
                                         S,
                                         self.lmbda,
                                         opt=popt,
                                         nproc=self.opt['OCDL', 'nproc'])
            X = xstep.solve()
            X = np.asarray(X.reshape(self.cri.shpX), dtype=self.dtype)
        else:
            if W is None:
                xstep = cbpdn.ConvBPDN(self.getdict(), S, self.lmbda, opt=copt)
                xstep.solve()
                X = np.asarray(xstep.getcoef().reshape(self.cri.shpX),
                               dtype=self.dtype)
            else:
                xstep = cbpdn.AddMaskSim(cbpdn.ConvBPDN,
                                         self.getdict(),
                                         S,
                                         W,
                                         self.lmbda,
                                         opt=copt)
                X = xstep.solve()
                X = np.asarray(X.reshape(self.cri.shpX), dtype=self.dtype)
                # The additive component is removed from masked signal
                add_cpnt = reconstruct_additive_component(xstep)
                S -= add_cpnt.reshape(S.shape)

        self.timer.stop('xstep')

        # update At and Bt
        self.timer.start('hessian')
        patches = self.im2slices(S)
        self.update_At(X)
        self.update_Bt(X, patches)
        self.timer.stop('hessian')
        self.Lmbda = self.dtype.type(self.alpha * self.Lmbda + 1)

        # update dictionary with FISTA
        fopt = copy.deepcopy(self.opt['CCMOD'])
        fopt['X0'] = self.D
        if self.opt['OCDL', 'DiminishingTol']:
            fopt['RelStopTol'] = \
                self.dtype.type(self.opt['CCMOD', 'RelStopTol']/(1.+self.j))
        self.timer.start('dstep')
        dstep = StripeSliceFISTA(self.At, self.Bt, opt=fopt)
        dstep.solve()
        self.timer.stop('dstep')

        # set dictionary
        self.setdict(dstep.getmin())

        self.timer.stop('solve_wo_eval')
        evl = self.evaluate(S, X)
        self.timer.start('solve_wo_eval')

        t = self.timer.elapsed(self.opt['IterTimer'])
        if self.opt['OCDL', 'CUCBPDN']:
            # this requires a slight modification of dictlrn
            itst = self.isc.iterstats(self.j, t, None, dstep.itstat[-1], evl)
        else:
            itst = self.isc.iterstats(self.j, t, xstep.itstat[-1],
                                      dstep.itstat[-1], evl)
        self.itstat.append(itst)

        if self.opt['Verbose']:
            self.isc.printiterstats(itst)

        self.j += 1

        self.timer.stop(['solve', 'solve_wo_eval'])

        if 0:
            import matplotlib.pyplot as plt
            plt.imshow(su.tiledict(self.getdict().squeeze()))
            plt.show()

        return self.getdict()
Exemple #21
0
    def __init__(self,
                 D,
                 S,
                 R,
                 opt=None,
                 lmbda=None,
                 optx=None,
                 dimK=None,
                 dimN=2,
                 *args,
                 **kwargs):
        """
        Parameters
        ----------
        xstep : internal xstep object (e.g. xstep.ConvBPDN)
        D : array_like
          Dictionary array
        S : array_like
          Signal array
        R : array_like
          Rank array
        lmbda : list of float
          Regularisation parameter
        opt : list containing :class:`ConvBPDN.Options` object
          Algorithm options for each individual solver
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        *args
          Variable length list of arguments for constructor of internal
          xstep object (e.g. mu)
        **kwargs
          Keyword arguments for constructor of internal xstep object
        """

        if opt is None:
            opt = AKConvBPDN.Options()
        self.opt = opt

        # Infer outer problem dimensions
        self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Parse mu
        if 'mu' in kwargs:
            mu = kwargs['mu']
        else:
            mu = [0] * self.cri.dimN

        # Parse lmbda and optx
        if lmbda is None: lmbda = [None] * self.cri.dimN
        if optx is None: optx = [None] * self.cri.dimN

        # Parse isc
        if 'isc' in kwargs:
            isc = kwargs['isc']
        else:
            isc = None

        # Store parameters
        self.lmbda = lmbda
        self.optx = optx
        self.mu = mu
        self.R = R

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=S.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype)

        # Compute signal in DFT domain
        self.Sf = sl.fftn(self.S, None, self.cri.axisN)
        # print('Sf shape %s \n' % (self.Sf.shape,))
        # print('S shape %s \n' % (self.S.shape,))
        # print('shpS %s \n' % (self.cri.shpS,))

        # Signal uni-dim (kruskal)
        # self.Skf = np.reshape(self.Sf,[np.prod(np.array(self.Sf.shape)),1],order='F')

        # Decomposed Kruskal Initialization
        self.K = []
        self.Kf = []
        Nvf = []
        for i, Nvi in enumerate(self.cri.Nv):  # Ui
            Ki = np.random.randn(Nvi, np.sum(self.R))
            Kfi = sl.pyfftw_empty_aligned(Ki.shape, self.Sf.dtype)
            Kfi[:] = sl.fftn(Ki, None, [0])
            self.K.append(Ki)
            self.Kf.append(Kfi)
            Nvf.append(Kfi.shape[0])

        self.Nvf = tuple(Nvf)

        # Fourier dimensions
        self.NC = int(np.prod(self.Nvf) * self.cri.Cd)

        # dict FFT
        self.setdict()

        # Init KCSC solver (Needs to be initiated inside AKConvBPDN because requires convolvedict() and reshapesignal())
        self.xstep = []
        for l in range(self.cri.dimN):

            Wl = self.convolvedict(l)  # convolvedict
            cri_l = KCSC_ConvRepIndexing(self.cri, self.R, l)  # cri KCSC

            self.xstep.append(KConvBPDN(Wl, np.reshape(self.Sf,cri_l.shpS,order='C'), cri_l,\
                                self.S.dtype, self.lmbda[l], self.mu[l], self.optx[l]))

        # Init isc
        if isc is None:

            isc_lst = []  # itStats from block-solver
            isc_fields = []
            for i in range(self.cri.dimN):
                str_i = '_{0!s}'.format(i)

                isc_i = IterStatsConfig(isfld=[
                    'ObjFun' + str_i, 'PrimalRsdl' + str_i, 'DualRsdl' + str_i,
                    'Rho' + str_i
                ],
                                        isxmap={
                                            'ObjFun' + str_i: 'ObjFun',
                                            'PrimalRsdl' + str_i: 'PrimalRsdl',
                                            'DualRsdl' + str_i: 'DualRsdl',
                                            'Rho' + str_i: 'Rho'
                                        },
                                        evlmap={},
                                        hdrtxt=[
                                            'Fnc' + str_i, 'r' + str_i,
                                            's' + str_i,
                                            u('ρ' + str_i)
                                        ],
                                        hdrmap={
                                            'Fnc' + str_i: 'ObjFun' + str_i,
                                            'r' + str_i: 'PrimalRsdl' + str_i,
                                            's' + str_i: 'DualRsdl' + str_i,
                                            u('ρ' + str_i): 'Rho' + str_i
                                        })
                isc_fields += isc_i.IterationStats._fields

                isc_lst.append(isc_i)

            # isc_it = IterStatsConfig(       # global itStats  -> No, to be managed in dictlearn
            #     isfld=['Iter','Time'],
            #     isxmap={},
            #     evlmap={},
            #     hdrtxt=['Itn'],
            #     hdrmap={'Itn': 'Iter'}
            # )
            #
            # isc_fields += isc_it._fields

        self.isc_lst = isc_lst
        # self.isc_it = isc_it
        self.isc = collections.namedtuple('IterationStats', isc_fields)

        # Required because dictlrn.DictLearn assumes that all valid
        # xstep objects have an IterationStats attribute
        # self.IterationStats = self.xstep.IterationStats

        self.itstat = []
        self.j = 0
Exemple #22
0
    def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):
        """
        This class supports an arbitrary number of spatial dimensions,
        `dimN`, with a default of 2. The input dictionary `D` is either
        `dimN` + 1 dimensional, in which case each spatial component
        (image in the default case) is assumed to consist of a single
        channel, or `dimN` + 2 dimensional, in which case the final
        dimension is assumed to contain the channels (e.g. colour
        channels in the case of images). The input signal set `S` is
        either `dimN` dimensional (no channels, only one signal), `dimN`
        + 1 dimensional (either multiple channels or multiple signals),
        or `dimN` + 2 dimensional (multiple channels and multiple
        signals). Determination of problem dimensions is handled by
        :class:`.cnvrep.CSC_ConvRepIndexing`.


        |

        **Call graph**

        .. image:: ../_static/jonga/cbpdn_init.svg
           :width: 20%
           :target: ../_static/jonga/cbpdn_init.svg

        |


        Parameters
        ----------
        D : array_like
          Dictionary array
        S : array_like
          Signal array
        lmbda : float
          Regularisation parameter
        opt : :class:`ConvBPDN.Options` object
          Algorithm options
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ComplexConvBPDN.Options()

        # Set dtype attribute based on S.dtype and opt['DataType']
        self.set_dtype(opt, S.dtype)

        # Set default lambda value if not specified
        if lmbda is None:
            cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
            Df = fftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN)
            Sf = fftn(S.reshape(cri.shpS), axes=cri.axisN)
            b = np.conj(Df) * Sf
            lmbda = 0.1 * abs(b).max()

        # Set l1 term scaling
        self.lmbda = self.dtype.type(lmbda)

        # Set penalty parameter
        self.set_attr('rho', opt['rho'], dval=(50.0 * self.lmbda + 1.0),
                      dtype=self.dtype)

        # Set rho_xi attribute (see Sec. VI.C of wohlberg-2015-adaptive)
        if self.lmbda != 0.0:
            # rho_xi = float((1.0 + (18.3)**(np.log10(self.lmbda) + 1.0)))
            rho_xi = (1.0 + (18.3) ** (np.log10(self.lmbda) + 1.0))
        else:
            rho_xi = 1.0
        self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=rho_xi,
                      dtype=self.dtype)

        # Call parent class __init__
        super(ComplexConvBPDN, self).__init__(D, S, opt, dimK, dimN)

        # Set l1 term weight array
        self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
        self.wl1 = self.wl1.reshape(cr.l1Wshape(self.wl1, self.cri))
Exemple #23
0
    def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):
        """

        |

        **Call graph**

        .. image:: ../_static/jonga/cbpdnrtv_init.svg
           :width: 20%
           :target: ../_static/jonga/cbpdnrtv_init.svg

        |


        Parameters
        ----------
        D : array_like
          Dictionary matrix
        S : array_like
          Signal vector or matrix
        lmbda : float
          Regularisation parameter (l1)
        mu : float
          Regularisation parameter (gradient)
        opt : :class:`ConvBPDNRecTV.Options` object
          Algorithm options
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial dimensions
        """

        if opt is None:
            opt = ConvBPDNRecTV.Options()

        # Infer problem dimensions and set relevant attributes of self
        self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Call parent class __init__
        Nx = np.product(np.array(self.cri.shpX))
        yshape = list(self.cri.shpX)
        yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd
        super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape, S.dtype, opt)

        # Set l1 term scaling and weight array
        self.lmbda = self.dtype.type(lmbda)
        self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
        self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri))

        self.mu = self.dtype.type(mu)
        if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:
            self.Wtv = np.asarray(
                opt['TVWeight'].reshape((1, ) * (dimN + 2) +
                                        opt['TVWeight'].shape),
                dtype=self.dtype)
        else:
            # Wtv is a scalar: no need to change shape
            self.Wtv = self.dtype.type(opt['TVWeight'])

        # Set penalty parameter
        self.set_attr('rho',
                      opt['rho'],
                      dval=(50.0 * self.lmbda + 1.0),
                      dtype=self.dtype)

        # Set rho_xi attribute
        self.set_attr('rho_xi',
                      opt['AutoRho', 'RsdlTarget'],
                      dval=1.0,
                      dtype=self.dtype)

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)

        # Compute signal in DFT domain
        self.Sf = sl.rfftn(self.S, None, self.cri.axisN)

        self.Gf, GHGf = sl.gradient_filters(self.cri.dimN + 3,
                                            self.cri.axisN,
                                            self.cri.Nv,
                                            dtype=self.dtype)

        # Initialise byte-aligned arrays for pyfftw
        self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
        self.Xf = sl.pyfftw_rfftn_empty_aligned(self.cri.shpX, self.cri.axisN,
                                                self.dtype)

        self.setdict()