Ejemplo n.º 1
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(sl.ifftn(
         sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None,
         (0, 1)).real,
                axis=2)
     lmbda = 1e-4
     rho = 1e-1
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 500,
         'RelStopTol': 1e-3,
         'rho': rho,
         'AutoRho': {
             'Enabled': False
         }
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert sl.rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 1e-4
Ejemplo n.º 2
0
 def test_02(self):
     N = 32
     M = 4
     Nd = 5
     D0 = cr.normalise(cr.zeromean(
         np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2)
     X = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X[xp] = np.random.randn(X[xp].size)
     S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0, 1)) *
                sl.fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2)
     rho = 1e-1
     opt = ccmod.ConvCnstrMOD_CG.Options({'Verbose': False,
                 'MaxMainIter': 500, 'LinSolveCheck': True,
                 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho,
                 'AutoRho': {'Enabled': False},
                 'CG': {'StopTol': 1e-5}})
     Xr = X.reshape(X.shape[0:2] + (1, 1,) + X.shape[2:])
     Sr = S.reshape(S.shape + (1,))
     c = ccmod.ConvCnstrMOD_CG(Xr, Sr, D0.shape, opt)
     c.solve()
     D1 = cr.bcrop(c.Y, D0.shape).squeeze()
     assert sl.rrs(D0, D1) < 1e-4
     assert np.array(c.getitstat().XSlvRelRes).max() < 1e-3
Ejemplo n.º 3
0
    def test_13(self):
        N = 64
        M = 4
        Nd = 8
        D0 = cr.normalise(cr.zeromean(
            np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2)
        X = np.zeros((N, N, M))
        xr = np.random.randn(N, N, M)
        xp = np.abs(xr) > 3
        X[xp] = np.random.randn(X[xp].size)
        S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0, 1)) *
                            sl.fftn(X, None, (0, 1)), None, (0, 1)).real,
                   axis=2)
        L = 0.5
        opt = ccmod.ConvCnstrMOD.Options(
            {'Verbose': False, 'MaxMainIter': 3000, 'ZeroMean': True,
             'RelStopTol': 0., 'L': L, 'BackTrack': {'Enabled': True}})
        Xr = X.reshape(X.shape[0:2] + (1, 1,) + X.shape[2:])
        Sr = S.reshape(S.shape + (1,))
        c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt)
        c.solve()
        D1 = cr.bcrop(c.X, D0.shape).squeeze()

        assert sl.rrs(D0, D1) < 1e-4
        assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
Ejemplo n.º 4
0
 def test_02(self):
     N = 32
     M = 4
     Nd = 5
     D0 = cr.normalise(cr.zeromean(
         np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2)
     X = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X[xp] = np.random.randn(X[xp].size)
     S = np.sum(sl.ifftn(sl.fftn(D0, (N, N), (0,1)) *
                sl.fftn(X, None, (0,1)), None, (0,1)).real, axis=2)
     rho = 1e-1
     opt = ccmod.ConvCnstrMOD_CG.Options({'Verbose': False,
                 'MaxMainIter': 500, 'LinSolveCheck': True,
                 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho,
                 'AutoRho': {'Enabled': False},
                 'CG': {'StopTol': 1e-5}})
     Xr = X.reshape(X.shape[0:2] + (1,1,) + X.shape[2:])
     Sr = S.reshape(S.shape + (1,))
     c = ccmod.ConvCnstrMOD_CG(Xr, Sr, D0.shape, opt)
     c.solve()
     D1 = cr.bcrop(c.Y, D0.shape).squeeze()
     assert(sl.rrs(D0, D1) < 1e-4)
     assert(np.array(c.getitstat().XSlvRelRes).max() < 1e-3)
Ejemplo n.º 5
0
 def test_10(self):
     N = 64
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(sl.ifftn(
         sl.fftn(D, (N, N), (0, 1)) * sl.fftn(X0, None, (0, 1)), None,
         (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 2000,
         'RelStopTol': 1e-5,
         'L': L,
         'BackTrack': {
             'Enabled': False
         }
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert (sl.rrs(X0, X1) < 5e-4)
     Sr = b.reconstruct().squeeze()
     assert (sl.rrs(S, Sr) < 2e-4)
Ejemplo n.º 6
0
def tikhonov_filter(s, lmbda, npd=16):
    r"""Lowpass filter based on Tikhonov regularization.

    Lowpass filter image(s) and return low and high frequency
    components, consisting of the lowpass filtered image and its
    difference with the input image. The lowpass filter is equivalent to
    Tikhonov regularization with `lmbda` as the regularization parameter
    and a discrete gradient as the operator in the regularization term,
    i.e. the lowpass component is the solution to

    .. math::
      \mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s}
      \right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;,

    where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the
    regularization parameter, and :math:`G_i` is an operator that
    computes the discrete gradient along image axis :math:`i`. Once the
    lowpass component :math:`\mathbf{x}` has been computed, the highpass
    component is just :math:`\mathbf{s} - \mathbf{x}`.


    Parameters
    ----------
    s : array_like
      Input image or array of images.
    lmbda : float
      Regularization parameter controlling lowpass filtering.
    npd : int, optional (default=16)
      Number of samples to pad at image boundaries.

    Returns
    -------
    sl : array_like
      Lowpass image or array of images.
    sh : array_like
      Highpass image or array of images.
    """

    grv = np.array([-1.0, 1.0]).reshape([2, 1])
    gcv = np.array([-1.0, 1.0]).reshape([1, 2])
    Gr = sla.fftn(grv, (s.shape[0]+2*npd, s.shape[1]+2*npd), (0, 1))
    Gc = sla.fftn(gcv, (s.shape[0]+2*npd, s.shape[1]+2*npd), (0, 1))
    A = 1.0 + lmbda*np.conj(Gr)*Gr + lmbda*np.conj(Gc)*Gc
    if s.ndim > 2:
        A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)]
    sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric')
    slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1)))
    sl = slp[npd:(slp.shape[0]-npd), npd:(slp.shape[1]-npd)]
    sh = s - sl
    return sl.astype(s.dtype), sh.astype(s.dtype)
Ejemplo n.º 7
0
def tikhonov_filter(s, lmbda, npd=16):
    r"""Lowpass filter based on Tikhonov regularization.

    Lowpass filter image(s) and return low and high frequency
    components, consisting of the lowpass filtered image and its
    difference with the input image. The lowpass filter is equivalent to
    Tikhonov regularization with `lmbda` as the regularization parameter
    and a discrete gradient as the operator in the regularization term,
    i.e. the lowpass component is the solution to

    .. math::
      \mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s}
      \right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;,

    where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the
    regularization parameter, and :math:`G_i` is an operator that
    computes the discrete gradient along image axis :math:`i`. Once the
    lowpass component :math:`\mathbf{x}` has been computed, the highpass
    component is just :math:`\mathbf{s} - \mathbf{x}`.

    Parameters
    ----------
    s : array_like
      Input image or array of images.
    lmbda : float
      Regularization parameter controlling lowpass filtering.
    npd : int, optional (default=16)
      Number of samples to pad at image boundaries.

    Returns
    -------
    sl : array_like
      Lowpass image or array of images.
    sh : array_like
      Highpass image or array of images.
    """

    grv = np.array([-1.0, 1.0]).reshape([2, 1])
    gcv = np.array([-1.0, 1.0]).reshape([1, 2])
    Gr = sla.fftn(grv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1))
    Gc = sla.fftn(gcv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1))
    A = 1.0 + lmbda*np.conj(Gr)*Gr + lmbda*np.conj(Gc)*Gc
    if s.ndim > 2:
        A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)]
    sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric')
    slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1)))
    sl = slp[npd:(slp.shape[0] - npd), npd:(slp.shape[1] - npd)]
    sh = s - sl
    return sl.astype(s.dtype), sh.astype(s.dtype)
Ejemplo n.º 8
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U

        print('YU dtype %s \n' % (self.YU.dtype, ))

        b = (self.WSf +
             self.rho * sl.fftn(self.YU, None, self.cri.axisn).squeeze())

        # print('b shape %s \n' % (b.shape,))

        # if self.cri.Cd == 1:
        for s in range(self.cri.n):
            self.Xf[:, 0, s] = linalg.cho_solve(self.c[s],
                                                b[:, s],
                                                check_finite=True)
        # else:
        #     raise ValueError("Multi-channel dictionary not implemented")
        #     self.Xf[:] = sl.solvemdbi_ism(self.Wf, self.mu + self.rho, b,
        #                                   self.cri.axisM, self.cri.axisC)

        self.X = sl.irfftn(self.Xf, [self.cri.n], self.cri.axisn)
Ejemplo n.º 9
0
    def setdict(self, D=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        # self.Df = sl.fftn(self.D, self.cri.Nv, self.cri.axisN)

        print('D shape %s \n' % (self.D.shape, ))
        print('axisN %s \n' % (self.cri.axisN, ))
        print('dimN %s \n' % (self.cri.dimN, ))

        # Df = self.D
        # for l in range(self.cri.dimN):
        #     Df = sl.fftn(Df, [self.cri.Nv[l]], [self.cri.axisN[l]])
        # self.Df = Df

        self.Df = sl.fftn(self.D, self.Nvf, self.cri.axisN)

        print('Df shape %s \n' % (self.Df.shape, ))
        print('self.cri.Cd %s \n' % (self.cri.Cd, ))
        print('self.NC %s \n' % (self.NC, ))
        print('self.Nvf %s \n' % (self.Nvf, ))

        # if not hasattr(self, 'cri_f'):
        #     # At first call: Infer outer problem dimensions for fourier domain
        #     self.cri_f = cr.CSC_ConvRepIndexing(self.Df, self.Sf, dimK=self.cri.dimK, dimN=self.cri.dimN)

        NC = self.NC
        M = self.cri.M

        self.Df_mat = np.dot(np.reshape(self.Df, [NC, M], order='F'),
                             self.getweights().transpose())  # Df_mat(NC,R*M)
Ejemplo n.º 10
0
def tikhonov_filter(s, lmbda, npd=16):
    """Lowpass filter based on Tikhonov regularization.

    Lowpass filter image(s) and return low and high frequency components,
    consisting of the lowpass filtered image and its difference with
    the input image. The lowpass filter is equivalent to Tikhonov
    regularization with `lmbda` as the regularization parameter and a
    discrete gradient as the operator in the regularization term.

    Parameters
    ----------
    s : array_like
      Input image or array of images.
    lmbda : float
      Regularization parameter controlling lowpass filtering.
    npd : int, optional (default=16)
      Number of samples to pad at image boundaries.

    Returns
    -------
    sl : array_like
      Lowpass image or array of images.
    sh : array_like
      Highpass image or array of images.
    """

    grv = np.array([-1.0, 1.0]).reshape([2, 1])
    gcv = np.array([-1.0, 1.0]).reshape([1, 2])
    Gr = sla.fftn(grv, (s.shape[0] + 2 * npd, s.shape[1] + 2 * npd), (0, 1))
    Gc = sla.fftn(gcv, (s.shape[0] + 2 * npd, s.shape[1] + 2 * npd), (0, 1))
    A = 1.0 + lmbda * np.conj(Gr) * Gr + lmbda * np.conj(Gc) * Gc
    if s.ndim > 2:
        A = A[(slice(None), ) * 2 + (np.newaxis, ) * (s.ndim - 2)]
    sp = np.pad(s, ((npd, npd), ) * 2 + ((0, 0), ) * (s.ndim - 2), 'symmetric')
    slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1)))
    sl = slp[npd:(slp.shape[0] - npd), npd:(slp.shape[1] - npd)]
    sh = s - sl
    return sl.astype(s.dtype), sh.astype(s.dtype)
Ejemplo n.º 11
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) *
                         sl.fftn(X0, None, (0, 1)), None, (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 2000,
                                   'RelStopTol': 1e-9, 'L': L,
                                   'BackTrack': {'Enabled': False}})
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert sl.rrs(X0, X1) < 5e-4
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 2e-4
Ejemplo n.º 12
0
 def test_09(self):
     N = 64
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) *
                sl.fftn(X0, None, (0, 1)), None, (0, 1)).real, axis=2)
     lmbda = 1e-4
     rho = 3e-3
     alpha = 6
     opt = parcbpdn.ParConvBPDN.Options({'Verbose': False,
                     'MaxMainIter': 1000, 'RelStopTol': 1e-3, 'rho': rho,
                     'alpha': alpha, 'AutoRho': {'Enabled': False}})
     b = parcbpdn.ParConvBPDN(D, S, lmbda, opt=opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert sl.rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 1e-4
Ejemplo n.º 13
0
    def test_13(self):
        N = 64
        M = 4
        Nd = 8
        D0 = cr.normalise(cr.zeromean(np.random.randn(Nd, Nd, M), (Nd, Nd, M),
                                      dimN=2),
                          dimN=2)
        X = np.zeros((N, N, M))
        xr = np.random.randn(N, N, M)
        xp = np.abs(xr) > 3
        X[xp] = np.random.randn(X[xp].size)
        S = np.sum(sl.ifftn(
            sl.fftn(D0, (N, N), (0, 1)) * sl.fftn(X, None, (0, 1)), None,
            (0, 1)).real,
                   axis=2)
        L = 0.5
        opt = ccmod.ConvCnstrMOD.Options({
            'Verbose': False,
            'MaxMainIter': 3000,
            'ZeroMean': True,
            'RelStopTol': 0.,
            'L': L,
            'BackTrack': {
                'Enabled': True
            }
        })
        Xr = X.reshape(X.shape[0:2] + (
            1,
            1,
        ) + X.shape[2:])
        Sr = S.reshape(S.shape + (1, ))
        c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt)
        c.solve()
        D1 = cr.bcrop(c.X, D0.shape).squeeze()

        assert sl.rrs(D0, D1) < 1e-4
        assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
Ejemplo n.º 14
0
    def solve(self):
        """Call the solve method of the inner KConvBPDN object and return the
        result.
        """

        itst = []

        # Main optimisation iterations
        for self.j in range(self.j, self.j + self.opt['MaxMainIter']):

            for l in range(self.cri.dimN):

                # Pre x-step
                Wl = self.convolvedict(l)  # convolvedict
                self.xstep[l].setdictf(Wl)  # setdictf

                # Solve KCSC
                self.xstep[l].solve()

                # Post x-step
                Kl = np.moveaxis(self.xstep[l].getcoef().squeeze(), [0, 1],
                                 [1, 0])
                self.Kf[l] = sl.fftn(Kl, None, [0])  # Update Kruskal

                # IterationStats
                xitstat = self.xstep.itstat[-1] if self.xstep.itstat else \
                          self.xstep.IterationStats(
                              *([0.0,] * len(self.xstep.IterationStats._fields)))

                itst += self.isc_lst[l].iterstats(self.j, 0, xitstat,
                                                  0)  # Accumulate

            self.itstat.append(
                self.isc(*itst))  # Cast to global itstats and store

        # Decomposed ifftn
        for l in range(self.cri.dimN):
            self.K[l] = sl.irfftn(self.Kf[l], self.cri.Nv[l],
                                  [0])  # ifft transform

        self.j += 1
Ejemplo n.º 15
0
 def test_19(self):
     x = np.random.randn(16, 8)
     xf = linalg.fftn(x, axes=(0,))
     n1 = np.linalg.norm(x)**2
     n2 = linalg.fl2norm2(xf, axis=(0,))
     assert np.abs(n1 - n2) < 1e-12
Ejemplo n.º 16
0
 def test_19(self):
     x = np.random.randn(16, 8)
     xf = linalg.fftn(x, axes=(0,))
     n1 = np.linalg.norm(x)**2
     n2 = linalg.fl2norm2(xf, axis=(0,))
     assert np.abs(n1 - n2) < 1e-12
Ejemplo n.º 17
0
def T_ConvFISTA_precompute(Gram,
                           Achapy,
                           Z_init,
                           L,
                           lbd,
                           beta,
                           maxit,
                           tol=1e-5,
                           verbose=False):
    """ Minimization of the sub-block of Z

        Gram: Gram matrix
        Achapy: vector A * y
        Z_init: initialization
        L: Lipschitz constant
        lbd, beta: hyerparameters

    """

    K, N_i, R = Z_init.shape

    pobj = []
    time0 = time.time()

    Zpred = Z_init.copy()
    Xfista = Z_init.copy()
    t = 1
    ite = 0.
    tol_it = tol + 1.
    while (tol_it > tol) and (ite < maxit):
        if verbose:
            print(ite)

        Zpred_old = Zpred.copy()

        # DFT of the activations
        Xfistachap = fftn(Xfista, axes=[1])

        #  Vectorization
        xfistachap = np.reshape(Xfistachap, (K, N_i * R), order='F').ravel()

        # Computation of the gradient
        gradf = (Gram.dot(xfistachap) - Achapy) + 2 * beta * xfistachap

        # Descent step
        xfistachap = np.array(xfistachap - gradf / L)

        # Matricization
        Xfistachap = xfistachap.reshape(K, N_i * R).reshape((K, N_i, R),
                                                            order='F')

        # IDFT of the activations
        Xfista = np.real(ifftn(Xfistachap, axes=[1]))

        # Soft-thresholding
        Zpred = np.sign(Xfista) * np.fmax(abs(Xfista) - lbd / L, 0.)

        # Nesterov Momentum
        t0 = t
        t = (1. + np.sqrt(1. + 4. * t**2)) / 2.
        Xfista = Zpred + ((t0 - 1.) / t) * (Zpred - Zpred_old)

        # Stopping criterion
        tol_it = np.max(abs(Zpred_old - Zpred))
        this_pobj = tol_it.copy()

        ite += 1
        pobj.append((time.time() - time0, this_pobj))

    print('last iteration:', ite)
    times, pobj = map(np.array, zip(*pobj))
    return Zpred, times, pobj
Ejemplo n.º 18
0
    def __init__(self,
                 D,
                 S,
                 R,
                 opt=None,
                 lmbda=None,
                 optx=None,
                 dimK=None,
                 dimN=2,
                 *args,
                 **kwargs):
        """
        Parameters
        ----------
        xstep : internal xstep object (e.g. xstep.ConvBPDN)
        D : array_like
          Dictionary array
        S : array_like
          Signal array
        R : array_like
          Rank array
        lmbda : list of float
          Regularisation parameter
        opt : list containing :class:`ConvBPDN.Options` object
          Algorithm options for each individual solver
        dimK : 0, 1, or None, optional (default None)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial/temporal dimensions
        *args
          Variable length list of arguments for constructor of internal
          xstep object (e.g. mu)
        **kwargs
          Keyword arguments for constructor of internal xstep object
        """

        if opt is None:
            opt = AKConvBPDN.Options()
        self.opt = opt

        # Infer outer problem dimensions
        self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Parse mu
        if 'mu' in kwargs:
            mu = kwargs['mu']
        else:
            mu = [0] * self.cri.dimN

        # Parse lmbda and optx
        if lmbda is None: lmbda = [None] * self.cri.dimN
        if optx is None: optx = [None] * self.cri.dimN

        # Parse isc
        if 'isc' in kwargs:
            isc = kwargs['isc']
        else:
            isc = None

        # Store parameters
        self.lmbda = lmbda
        self.optx = optx
        self.mu = mu
        self.R = R

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=S.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=S.dtype)

        # Compute signal in DFT domain
        self.Sf = sl.fftn(self.S, None, self.cri.axisN)
        # print('Sf shape %s \n' % (self.Sf.shape,))
        # print('S shape %s \n' % (self.S.shape,))
        # print('shpS %s \n' % (self.cri.shpS,))

        # Signal uni-dim (kruskal)
        # self.Skf = np.reshape(self.Sf,[np.prod(np.array(self.Sf.shape)),1],order='F')

        # Decomposed Kruskal Initialization
        self.K = []
        self.Kf = []
        Nvf = []
        for i, Nvi in enumerate(self.cri.Nv):  # Ui
            Ki = np.random.randn(Nvi, np.sum(self.R))
            Kfi = sl.pyfftw_empty_aligned(Ki.shape, self.Sf.dtype)
            Kfi[:] = sl.fftn(Ki, None, [0])
            self.K.append(Ki)
            self.Kf.append(Kfi)
            Nvf.append(Kfi.shape[0])

        self.Nvf = tuple(Nvf)

        # Fourier dimensions
        self.NC = int(np.prod(self.Nvf) * self.cri.Cd)

        # dict FFT
        self.setdict()

        # Init KCSC solver (Needs to be initiated inside AKConvBPDN because requires convolvedict() and reshapesignal())
        self.xstep = []
        for l in range(self.cri.dimN):

            Wl = self.convolvedict(l)  # convolvedict
            cri_l = KCSC_ConvRepIndexing(self.cri, self.R, l)  # cri KCSC

            self.xstep.append(KConvBPDN(Wl, np.reshape(self.Sf,cri_l.shpS,order='C'), cri_l,\
                                self.S.dtype, self.lmbda[l], self.mu[l], self.optx[l]))

        # Init isc
        if isc is None:

            isc_lst = []  # itStats from block-solver
            isc_fields = []
            for i in range(self.cri.dimN):
                str_i = '_{0!s}'.format(i)

                isc_i = IterStatsConfig(isfld=[
                    'ObjFun' + str_i, 'PrimalRsdl' + str_i, 'DualRsdl' + str_i,
                    'Rho' + str_i
                ],
                                        isxmap={
                                            'ObjFun' + str_i: 'ObjFun',
                                            'PrimalRsdl' + str_i: 'PrimalRsdl',
                                            'DualRsdl' + str_i: 'DualRsdl',
                                            'Rho' + str_i: 'Rho'
                                        },
                                        evlmap={},
                                        hdrtxt=[
                                            'Fnc' + str_i, 'r' + str_i,
                                            's' + str_i,
                                            u('ρ' + str_i)
                                        ],
                                        hdrmap={
                                            'Fnc' + str_i: 'ObjFun' + str_i,
                                            'r' + str_i: 'PrimalRsdl' + str_i,
                                            's' + str_i: 'DualRsdl' + str_i,
                                            u('ρ' + str_i): 'Rho' + str_i
                                        })
                isc_fields += isc_i.IterationStats._fields

                isc_lst.append(isc_i)

            # isc_it = IterStatsConfig(       # global itStats  -> No, to be managed in dictlearn
            #     isfld=['Iter','Time'],
            #     isxmap={},
            #     evlmap={},
            #     hdrtxt=['Itn'],
            #     hdrmap={'Itn': 'Iter'}
            # )
            #
            # isc_fields += isc_it._fields

        self.isc_lst = isc_lst
        # self.isc_it = isc_it
        self.isc = collections.namedtuple('IterationStats', isc_fields)

        # Required because dictlrn.DictLearn assumes that all valid
        # xstep objects have an IterationStats attribute
        # self.IterationStats = self.xstep.IterationStats

        self.itstat = []
        self.j = 0