示例#1
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(ifftn(
         fftn(D, (N, N), (0, 1)) * fftn(X0, None, (0, 1)), None,
         (0, 1)).real,
                axis=2)
     lmbda = 1e-4
     rho = 1e-1
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 500,
         'RelStopTol': 1e-3,
         'rho': rho,
         'AutoRho': {
             'Enabled': False
         }
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert rrs(S, Sr) < 1e-4
示例#2
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U

        b = self.DSf + self.rho * fftn(self.YU, None, self.cri.axisN)
        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho, b, self.c,
                                        self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
示例#3
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = np.random.randn(Nd, Nd, M)
     X0 = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X0[xp] = np.random.randn(X0[xp].size)
     S = np.sum(ifftn(
         fftn(D, (N, N), (0, 1)) * fftn(X0, None, (0, 1)), None,
         (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 2000,
         'RelStopTol': 1e-9,
         'L': L,
         'Backtrack': BacktrackStandard()
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert sl.rrs(X0, X1) < 5e-4
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 2e-4
示例#4
0
    def test_18(self):
        N = 64
        M = 4
        Nd = 8
        D0 = cr.normalise(cr.zeromean(np.random.randn(Nd, Nd, M), (Nd, Nd, M),
                                      dimN=2),
                          dimN=2)
        X = np.zeros((N, N, M))
        xr = np.random.randn(N, N, M)
        xp = np.abs(xr) > 3
        X[xp] = np.random.randn(X[xp].size)
        S = np.sum(ifftn(
            fftn(D0, (N, N), (0, 1)) * fftn(X, None, (0, 1)), None,
            (0, 1)).real,
                   axis=2)
        L = 50.0
        opt = ccmod.ConvCnstrMOD.Options({
            'Verbose': False,
            'MaxMainIter': 3000,
            'ZeroMean': True,
            'RelStopTol': 0.,
            'L': L,
            'Monotone': True
        })
        Xr = X.reshape(X.shape[0:2] + (
            1,
            1,
        ) + X.shape[2:])
        Sr = S.reshape(S.shape + (1, ))
        c = ccmod.ConvCnstrMOD(Xr, Sr, D0.shape, opt)
        c.solve()
        D1 = cr.bcrop(c.X, D0.shape).squeeze()

        assert rrs(D0, D1) < 1e-4
        assert np.array(c.getitstat().Rsdl)[-1] < 1e-5
示例#5
0
 def test_02(self):
     N = 32
     M = 4
     Nd = 5
     D0 = cr.normalise(cr.zeromean(
         np.random.randn(Nd, Nd, M), (Nd, Nd, M), dimN=2), dimN=2)
     X = np.zeros((N, N, M))
     xr = np.random.randn(N, N, M)
     xp = np.abs(xr) > 3
     X[xp] = np.random.randn(X[xp].size)
     S = np.sum(ifftn(fftn(D0, (N, N), (0, 1)) *
                fftn(X, None, (0, 1)), None, (0, 1)).real, axis=2)
     rho = 1e-1
     opt = ccmod.ConvCnstrMOD_CG.Options({'Verbose': False,
                 'MaxMainIter': 500, 'LinSolveCheck': True,
                 'ZeroMean': True, 'RelStopTol': 1e-5, 'rho': rho,
                 'AutoRho': {'Enabled': False},
                 'CG': {'StopTol': 1e-5}})
     Xr = X.reshape(X.shape[0:2] + (1, 1,) + X.shape[2:])
     Sr = S.reshape(S.shape + (1,))
     c = ccmod.ConvCnstrMOD_CG(Xr, Sr, D0.shape, opt)
     c.solve()
     D1 = cr.bcrop(c.Y, D0.shape).squeeze()
     assert rrs(D0, D1) < 1e-4
     assert np.array(c.getitstat().XSlvRelRes).max() < 1e-3
示例#6
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.Y
        Xf = fftn(X, None, self.cri.axisN)
        Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)
        return ifftn(Sf, self.cri.Nv, self.cri.axisN)
示例#7
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho * fftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
示例#8
0
    def reconstruct(self, D=None):
        """Reconstruct representation."""

        if D is None:
            Df = self.Xf
        else:
            Df = fftn(D, None, self.cri.axisN)

        Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
        return ifftn(Sf, self.cri.Nv, self.cri.axisN)
示例#9
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*fftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(self.Zf, [i],
                                                 axis=self.cri.axisK),
                                         self.rho,
                                         b,
                                         axis=self.cri.axisM)
        self.X[..., i] = ifftn(self.Xf[..., i], self.cri.Nv, self.cri.axisN)
示例#10
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*fftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(self.Zf[..., [i], :],
                                                 self.rho,
                                                 b[..., i],
                                                 axis=self.cri.axisM)
            self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho * fftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho * Xf,
                        axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None