Exemple #1
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #2
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #3
0
    def setdict(self, D=None, B=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        if B is not None:
            self.B = np.asarray(B, dtype=self.dtype)

        if B is not None or not hasattr(self, 'Gamma'):
            self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B))
            self.Gamma = np.abs(self.Gamma)

        if D is not None or not hasattr(self, 'Df'):
            self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
            self.DSf = np.conj(self.Df) * self.Sf
            self.DSfBQ = sl.dot(self.B.dot(self.Q).T, self.DSf,
                                axis=self.cri.axisC)

        # Fold square root of Gamma into the dictionary array to enable
        # use of the solvedbi_sm solver
        shpg = [1] * len(self.cri.shpD)
        shpg[self.cri.axisC] = self.Gamma.shape[0]
        Gamma2 = np.sqrt(self.Gamma).reshape(shpg)
        self.gDf = Gamma2 * self.Df

        if self.opt['HighMemSolve']:
            self.c = sl.solvedbi_sm_c(self.gDf, np.conj(self.gDf), self.rho,
                                      self.cri.axisM)
        else:
            self.c = None
Exemple #4
0
    def setdict(self, D=None, B=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        if B is not None:
            self.B = np.asarray(B, dtype=self.dtype)

        if B is not None or not hasattr(self, 'Gamma'):
            self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B))
            self.Gamma = np.abs(self.Gamma)

        if D is not None or not hasattr(self, 'Df'):
            self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
            self.DSf = np.conj(self.Df) * self.Sf
            self.DSfBQ = sl.dot(self.B.dot(self.Q).T, self.DSf,
                                axis=self.cri.axisC)

        # Fold square root of Gamma into the dictionary array to enable
        # use of the solvedbi_sm solver
        shpg = [1] * len(self.cri.shpD)
        shpg[self.cri.axisC] = self.Gamma.shape[0]
        Gamma2 = np.sqrt(self.Gamma).reshape(shpg)
        self.gDf = Gamma2 * self.Df

        if self.opt['HighMemSolve']:
            self.c = sl.solvedbi_sm_c(self.gDf, np.conj(self.gDf), self.rho,
                                      self.cri.axisM)
        else:
            self.c = None
Exemple #5
0
 def test_25(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(3, 8, 4, 12)
     c1 = np.zeros((3, 7, 4, 12))
     for i0 in range(c1.shape[0]):
         for i1 in range(c1.shape[3]):
             c1[i0, ..., i1] = a.dot(b[i0, ..., i1])
     c2 = linalg.dot(a, b, axis=1)
     assert np.linalg.norm(c1 - c2) < 2e-14
Exemple #6
0
 def test_24(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(3, 4, 8, 12)
     c1 = np.zeros((3, 4, 7, 12))
     for i0 in range(c1.shape[0]):
         for i1 in range(c1.shape[1]):
             c1[i0, i1] = a.dot(b[i0, i1])
     c2 = linalg.dot(a, b)
     assert np.linalg.norm(c1 - c2) < 2e-14
Exemple #7
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.X
        Xf = sl.rfftn(X, None, self.cri.axisN)
        Sf = sl.dot(self.B, np.sum(self.Df * Xf, axis=self.cri.axisM),
                    axis=self.cri.axisC)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemple #8
0
 def test_23(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(3, 8, 4, 12)
     c1 = np.zeros((3, 7, 4, 12))
     for i0 in range(c1.shape[0]):
         for i1 in range(c1.shape[3]):
             c1[i0, ..., i1] = a.dot(b[i0, ..., i1])
     c2 = linalg.dot(a, b, axis=1)
     assert np.linalg.norm(c1 - c2) < 2e-14
Exemple #9
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`.
        """

        DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(),
                                       axis=self.cri.axisM),
                       axis=self.cri.axisC)
        Ef = DXBf - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Exemple #10
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`.
        """

        DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(),
                                       axis=self.cri.axisM),
                       axis=self.cri.axisC)
        Ef = DXBf - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Exemple #11
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.X
        Xf = sl.rfftn(X, None, self.cri.axisN)
        Sf = sl.dot(self.B, np.sum(self.Df * Xf, axis=self.cri.axisM),
                    axis=self.cri.axisC)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemple #12
0
 def test_22(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(3, 4, 8, 12)
     c1 = np.zeros((3, 4, 7, 12))
     for i0 in range(c1.shape[0]):
         for i1 in range(c1.shape[1]):
             c1[i0, i1] = a.dot(b[i0, i1])
     c2 = linalg.dot(a, b)
     assert np.linalg.norm(c1 - c2) < 2e-14
Exemple #13
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Exemple #14
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Exemple #15
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B.T, np.conj(self.Df) * Y0f, axis=self.cri.axisC),
                   self.cri.Nv, self.cri.axisN)
Exemple #16
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B.T, np.conj(self.Df) * Y0f, axis=self.cri.axisC),
                   self.cri.Nv, self.cri.axisN)
Exemple #17
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        Zf = rfftn(self.YU, None, self.cri.axisN)
        ZfQ = dot(self.Q.T, Zf, axis=self.cri.axisC)
        b = self.DSfBQ + self.rho * ZfQ

        Xh = solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM)
        self.Xf[:] = dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) * inner(
                self.Df, self.Xf, axis=self.cri.axisM)
            DDXfBB = dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = DDXfBB + self.rho * self.Xf
            b = dot(self.B.T, self.DSf, axis=self.cri.axisC) + \
                self.rho * Zf
            self.xrrs = rrs(ax, b)
        else:
            self.xrrs = None
Exemple #18
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC)
        b = self.DSfBQ + self.rho * ZfQ

        Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c,
                            axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = DDXfBB + self.rho * self.Xf
            b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \
                self.rho * Zf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #19
0
    id = select_device_by_load()
    info = gpu_info()
    if info:
        print('Running on GPU %d (%s)\n' % (id, info[id].name))

b = pdcsc.ConvProdDictBPDN(np2cp(D), np2cp(B), np2cp(shc), lmbda, opt, dimK=0)
X = cp2np(b.solve())
print("ConvProdDictBPDN solve time: %.2fs" % b.timer.elapsed('solve'))


"""
Compute partial and full reconstructions from sparse representation $X$ with respect to convolutional dictionary $D$ and standard dictionary $B$. The partial reconstructions are $DX$ and $XB$, and the full reconstruction is $DXB$.
"""

DX = sl.fftconv(D[..., np.newaxis, np.newaxis, :], X)
XB = sl.dot(B, X, axis=2)
shr = cp2np(b.reconstruct().squeeze())
imgr = slc + shr
print("Reconstruction PSNR: %.2fdB\n" % sm.psnr(img, imgr))


"""
Display original and reconstructed images.
"""

gamma = lambda x, g: np.sign(x) * (np.abs(x)**g)

fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(14, 14))
plot.imview(img, title='Original image', ax=ax[0, 0], fig=fig)
plot.imview(slc, title='Lowpass component', ax=ax[0, 1], fig=fig)
plot.imview(imgr, title='Reconstructed image', ax=ax[1, 0], fig=fig)
Exemple #20
0
 def test_23(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(8, 12)
     c1 = a.dot(b)
     c2 = linalg.dot(a, b)
     assert np.linalg.norm(c1 - c2) < 1e-14
Exemple #21
0
    def __init__(self, D, B, S, lmbda, mu, W=None, opt=None, dimK=0,
                 dimN=2):
        """
        Parameters
        ----------
        D : array_like
          Dictionary matrix
        B : array_like
          Standard dictionary array
        S : array_like
          Signal vector or matrix
        lmbda : float
          Regularisation parameter (l1)
        mu : float
          Regularisation parameter (gradient)
        W : array_like
          Mask array. The array shape must be such that the array is
          compatible for multiplication with input array S (see
          :func:`.cnvrep.mskWshape` for more details).
        opt : :class:`ConvProdDictL1L1Grd.Options` object
          Algorithm options
        dimK : 0, 1, optional (default 0)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ConvProdDictL1L1Grd.Options()

        # Keep a record of the B dictionary
        self.set_dtype(opt, S.dtype)
        self.B = np.asarray(B, dtype=self.dtype)

        # S is an N x C matrix, D is an N x N M_D matrix, B is a C x M_B
        # matrix, and X is an N M x M_B matrix. The base class of this
        # class expects that X is  N M x C (i.e. the same number of columns
        # as in S), so we pass its initialiser the product S B, which is
        # a N x M_B matrix, so that it initialises arrays with the correct
        # number of channels. This is the first of many nasty hacks in
        # this class!
        scidx = -2 if dimK == 1 else -1
        SB = sl.dot(B.T, S, axis=scidx)
        super(ConvProdDictL1L1Grd, self).__init__(
            D, SB, lmbda, mu, W=W, opt=opt, dimK=dimK, dimN=dimN)

        # Ensure that the dictionary is single channel
        if self.cri.Cd > 1:
            raise ValueError('Only single-channel convolutional dictionaries'
                             ' are supported')

        # We need to correct the shape of S due to the modified S passed to
        # the base class initialiser
        shpS = list(self.cri.shpS)
        shpS[self.cri.axisC] = S.shape[self.cri.axisC]
        self.cri.shpS = tuple(shpS)
        self.S = np.asarray(S.reshape(shpS), dtype=self.dtype)

        # We also need to correct the shapes of a number of other working
        # arrays because we have to change the mechanism for combining
        # the Y0 and Y1 blocks into a single array. In the base class
        # these arrays can just be concatenated on an appropriate axis,
        # but this is not possible here due to the different array
        # shapes. The solution is that the composite array is one
        # dimensional, with the component blocks being extracted via
        # one dimensional slicing and then reshaped to the appropriate
        # shapes.
        self.y0shp = self.cri.shpS
        self.y1shp = self.cri.shpX
        self.y0I = int(np.prod(np.array(self.y0shp[self.cri.axisC:])))
        self.y1I = int(np.prod(np.array(self.y1shp[self.cri.axisC:])))
        self.yshp = self.cri.shpX[0:self.cri.axisC:] + (self.y0I + self.y1I,)
        self.Y = np.zeros(self.yshp, dtype=self.dtype)
        self.U = np.zeros(self.yshp, dtype=self.dtype)
        self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
Exemple #22
0
 def test_21(self):
     a = np.random.randn(7, 8)
     b = np.random.randn(8, 12)
     c1 = a.dot(b)
     c2 = linalg.dot(a, b)
     assert np.linalg.norm(c1 - c2) < 1e-14
Exemple #23
0
    def __init__(self, D, B, S, lmbda, mu, W=None, opt=None, dimK=0,
                 dimN=2):
        """
        Parameters
        ----------
        D : array_like
          Dictionary matrix
        B : array_like
          Standard dictionary array
        S : array_like
          Signal vector or matrix
        lmbda : float
          Regularisation parameter (l1)
        mu : float
          Regularisation parameter (gradient)
        W : array_like
          Mask array. The array shape must be such that the array is
          compatible for multiplication with input array S (see
          :func:`.cnvrep.mskWshape` for more details).
        opt : :class:`ConvProdDictL1L1Grd.Options` object
          Algorithm options
        dimK : 0, 1, optional (default 0)
          Number of dimensions in input signal corresponding to multiple
          independent signals
        dimN : int, optional (default 2)
          Number of spatial dimensions
        """

        # Set default options if none specified
        if opt is None:
            opt = ConvProdDictL1L1Grd.Options()

        # Keep a record of the B dictionary
        self.set_dtype(opt, S.dtype)
        self.B = np.asarray(B, dtype=self.dtype)

        # S is an N x C matrix, D is an N x N M_D matrix, B is a C x M_B
        # matrix, and X is an N M x M_B matrix. The base class of this
        # class expects that X is N M x C (i.e. the same number of columns
        # as in S), so we pass its initialiser the product S B, which is
        # a N x M_B matrix, so that it initialises arrays with the correct
        # number of channels. This is the first of many nasty hacks in
        # this class!
        scidx = -2 if dimK == 1 else -1
        SB = sl.dot(B.T, S, axis=scidx)
        super(ConvProdDictL1L1Grd, self).__init__(
            D, SB, lmbda, mu, W=W, opt=opt, dimK=dimK, dimN=dimN)

        # Ensure that the dictionary is single channel
        if self.cri.Cd > 1:
            raise ValueError('Only single-channel convolutional dictionaries'
                             ' are supported')

        # We need to correct the shape of S due to the modified S passed to
        # the base class initialiser
        shpS = list(self.cri.shpS)
        shpS[self.cri.axisC] = S.shape[self.cri.axisC]
        self.cri.shpS = tuple(shpS)
        self.S = np.asarray(S.reshape(shpS), dtype=self.dtype)

        # We also need to correct the shapes of a number of other working
        # arrays because we have to change the mechanism for combining
        # the Y0 and Y1 blocks into a single array. In the base class
        # these arrays can just be concatenated on an appropriate axis,
        # but this is not possible here due to the different array
        # shapes. The solution is that the composite array is one
        # dimensional, with the component blocks being extracted via
        # one dimensional slicing and then reshaped to the appropriate
        # shapes.
        self.y0shp = self.cri.shpS
        self.y1shp = self.cri.shpX
        self.y0I = int(np.prod(np.array(self.y0shp[self.cri.axisC:])))
        self.y1I = int(np.prod(np.array(self.y1shp[self.cri.axisC:])))
        self.yshp = self.cri.shpX[0:self.cri.axisC:] + (self.y0I + self.y1I,)
        self.Y = np.zeros(self.yshp, dtype=self.dtype)
        self.U = np.zeros(self.yshp, dtype=self.dtype)
        self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
Exemple #24
0
    id = select_device_by_load()
    info = gpu_info()
    if info:
        print('Running on GPU %d (%s)\n' % (id, info[id].name))

b = pdcsc.ConvProdDictBPDN(np2cp(D), np2cp(B), np2cp(shc), lmbda, opt, dimK=0)
X = cp2np(b.solve())
print("ConvProdDictBPDN solve time: %.2fs" % b.timer.elapsed('solve'))


"""
Compute partial and full reconstructions from sparse representation $X$ with respect to convolutional dictionary $D$ and standard dictionary $B$. The partial reconstructions are $DX$ and $XB$, and the full reconstruction is $DXB$.
"""

DX = fft.fftconv(D[..., np.newaxis, np.newaxis, :], X, axes=(0, 1))
XB = linalg.dot(B, X, axis=2)
shr = cp2np(b.reconstruct().squeeze())
imgr = slc + shr
print("Reconstruction PSNR: %.2fdB\n" % metric.psnr(img, imgr))


"""
Display original and reconstructed images.
"""

gamma = lambda x, g: np.sign(x) * (np.abs(x)**g)

fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(14, 14))
plot.imview(img, title='Original image', ax=ax[0, 0], fig=fig)
plot.imview(slc, title='Lowpass component', ax=ax[0, 1], fig=fig)
plot.imview(imgr, title='Reconstructed image', ax=ax[1, 0], fig=fig)