Ejemplo n.º 1
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Ejemplo n.º 2
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(
                    self.Zf[..., [i], :], self.rho, b[..., i],
                    axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)


        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Ejemplo n.º 3
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.getdict(crop=False)
        if X is None:
            X = self.getcoef()
        Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = sl.inner(Df, Xf, axis=self.xstep.cri.axisM)
        return sl.irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Ejemplo n.º 4
0
def ccmodmd_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_D_Y0 - mp_D_U0[k]
    YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k]
    b = sl.rfftn(YU0, None, mp_cri.axisN) + \
      np.conj(mp_Zf[k]) * sl.rfftn(YU1, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM)
    mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 5
0
    def setdict(self, D=None, B=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        if B is not None:
            self.B = np.asarray(B, dtype=self.dtype)

        if B is not None or not hasattr(self, 'Gamma'):
            self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B))
            self.Gamma = np.abs(self.Gamma)

        if D is not None or not hasattr(self, 'Df'):
            self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)

        # Fold square root of Gamma into the dictionary array to enable
        # use of the solvedbi_sm solver
        shpg = [1] * len(self.cri.shpD)
        shpg[self.cri.axisC] = self.Gamma.shape[0]
        Gamma2 = np.sqrt(self.Gamma).reshape(shpg)
        self.gDf = Gamma2 * self.Df

        if self.opt['HighMemSolve']:
            self.c = sl.solvedbd_sm_c(
                self.gDf, np.conj(self.gDf),
                (self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM)
        else:
            self.c = None
Ejemplo n.º 6
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(
                self.Df, self.rho*self.GHGf + self.rho, b, self.c,
                self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(
                self.Df, self.rho*self.GHGf + self.rho, b, self.cri.axisM,
                self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Ejemplo n.º 7
0
    def obfn_fvarf(self):
        """Variable to be evaluated in computing data fidelity term,
        depending on 'fEvalX' option value.
        """

        return self.Xf if self.opt['fEvalX'] else \
            sl.rfftn(self.Y, None, self.cri.axisN)
Ejemplo n.º 8
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Ejemplo n.º 9
0
def cbpdnmd_setdict():
    """Set the dictionary for the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    # Set working dictionary for cbpdn step and compute DFT of dictionary D
    mp_Df[:] = sl.rfftn(mp_D_Y0, mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 10
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.X
        Xf = sl.rfftn(X, None, self.cri.axisN)
        Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Ejemplo n.º 11
0
    def cnst_AT(self, X):
        r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
        a component of ADMM problem constraint. In this case
        :math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T) \mathbf{x}`.
        """

        Xf = sl.rfftn(X, axes=self.axes)
        return np.sum(sl.irfftn(np.conj(self.Gf)*Xf, self.axsz,
                                axes=self.axes), axis=self.Y.ndim-1)
Ejemplo n.º 12
0
    def cnst_A1T(self, Y1):
        r"""Compute :math:`A_1^T \mathbf{y}_1` component of
        :math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
        (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`.
        """

        Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
        return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
                         self.cri.axisN)
Ejemplo n.º 13
0
 def obfn_dfd(self):
     r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
     \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
     """
     XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN)
     DX = np.moveaxis(sl.irfftn(sl.inner(mp_Df, XF, mp_axisM),
                                mp_Nv, mp_axisN), mp_axisM,
                      self.cri.axisM)
     return np.sum((self.W*(DX-self.S))**2)/2.0
Ejemplo n.º 14
0
def ccmodmd_setcoef(k):
    """Set the coefficient maps for the ccmod stage. The only parameter is
    the slice index `k` and there are no return values; all inputs and
    outputs are from and to global variables.
    """

    # Set working coefficient maps for ccmod step and compute DFT of
    # coefficient maps Z
    mp_Zf[k] = sl.rfftn(mp_Z_Y1[k], mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 15
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
                      axis=mp_cri.axisC) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 16
0
    def cnst_A(self, X, Xf=None):
        r"""Compute :math:`A \mathbf{x}` component of ADMM problem
        constraint.  In this case :math:`A \mathbf{x} = (G_r^T \;\;
        G_c^T)^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.axes)
        return sl.irfftn(self.Gf*Xf[..., np.newaxis], self.axsz,
                         axes=self.axes)
Ejemplo n.º 17
0
    def cnst_A0T(self, X):
        r"""Compute :math:`A_0^T \mathbf{x}` where :math:`A_0 \mathbf{x}`
        is a component of the ADMM problem constraint. In this case
        :math:`A_0^T \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )
        \mathbf{x}`.
        """

        Xf = sl.rfftn(X, axes=self.cri.axisN)
        return self.Wtv[..., np.newaxis] * sl.irfftn(
            np.conj(self.Gf) * Xf[..., 0:-1], self.cri.Nv, axes=self.cri.axisN)
Ejemplo n.º 18
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
        \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.cri.axisN)
        return self.Wtv[..., np.newaxis] * sl.irfftn(
            self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN)
Ejemplo n.º 19
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Ejemplo n.º 20
0
    def reconstruct(self, D=None):
        """Reconstruct representation."""

        if D is None:
            Df = self.Xf
        else:
            Df = sl.rfftn(D, None, self.cri.axisN)

        Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Ejemplo n.º 21
0
def ccmod_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_D_Y - mp_D_U[k]
    b = mp_ZSf[k] + mp_drho * sl.rfftn(YU, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], mp_drho, b, axis=mp_cri.axisM)
    mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 22
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Ejemplo n.º 23
0
    def compute_residuals(self):
        """Compute residuals and stopping thresholds. The parent class
        method is overridden to ensure that the residual calculations
        include the additional variables introduced in the modification
        to the baseline algorithm.
        """

        # The full primary residual is straightforward to compute from
        # the primary residuals for the baseline algorithm and for the
        # additional variables
        r0 = self.rsdl_r(self.AXnr, self.Y)
        r1 = self.AX1nr - self.Y1 - self.S
        r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))

        # The full dual residual is more complicated to compute than the
        # full primary residual
        ATU = self.swapaxes(self.U) + sl.irfftn(
            np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
            self.cri.Nv, self.cri.axisN)
        s = self.rho * np.linalg.norm(ATU)

        # The normalisation factor for the full primal residual is also not
        # straightforward
        nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
                      np.linalg.norm(self.AX1nr)**2)
        nY = np.sqrt(np.linalg.norm(self.Y)**2 +
                     np.linalg.norm(self.Y1)**2)
        rn = max(nAX, nY, np.linalg.norm(self.S))

        # The normalisation factor for the full dual residual is
        # straightforward to compute
        sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
                                np.linalg.norm(self.U1)**2)

        # Final residual values and stopping tolerances depend on
        # whether standard or normalised residuals are specified via the
        # options object
        if self.opt['AutoRho', 'StdResiduals']:
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                rn*self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                sn*self.opt['RelStopTol']
        else:
            if rn == 0.0:
                rn = 1.0
            if sn == 0.0:
                sn = 1.0
            r /= rn
            s /= sn
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua
Ejemplo n.º 24
0
    def cnst_AT(self, X):
        r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
        a component of ADMM problem constraint. In this case
        :math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T) \mathbf{x}`.
        """

        Xf = sl.rfftn(X, axes=self.axes)
        return np.sum(sl.irfftn(np.conj(self.Gf) * Xf,
                                self.axsz,
                                axes=self.axes),
                      axis=self.Y.ndim - 1)
Ejemplo n.º 25
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B,
                   sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Ejemplo n.º 26
0
    def cnst_A(self, X, Xf=None):
        r"""Compute :math:`A \mathbf{x}` component of ADMM problem
        constraint.  In this case :math:`A \mathbf{x} = (G_r^T \;\;
        G_c^T)^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.axes)
        return sl.irfftn(self.Gf * Xf[..., np.newaxis],
                         self.axsz,
                         axes=self.axes)
Ejemplo n.º 27
0
    def compute_residuals(self):
        """Compute residuals and stopping thresholds. The parent class
        method is overridden to ensure that the residual calculations
        include the additional variables introduced in the modification
        to the baseline algorithm.
        """

        # The full primary residual is straightforward to compute from
        # the primary residuals for the baseline algorithm and for the
        # additional variables
        r0 = self.rsdl_r(self.AXnr, self.Y)
        r1 = self.AX1nr - self.Y1 - self.S
        r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))

        # The full dual residual is more complicated to compute than the
        # full primary residual
        ATU = self.swapaxes(self.U) + sl.irfftn(
            np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
            self.cri.Nv, self.cri.axisN)
        s = self.rho * np.linalg.norm(ATU)

        # The normalisation factor for the full primal residual is also not
        # straightforward
        nAX = np.sqrt(
            np.linalg.norm(self.AXnr)**2 + np.linalg.norm(self.AX1nr)**2)
        nY = np.sqrt(np.linalg.norm(self.Y)**2 + np.linalg.norm(self.Y1)**2)
        rn = max(nAX, nY, np.linalg.norm(self.S))

        # The normalisation factor for the full dual residual is
        # straightforward to compute
        sn = self.rho * np.sqrt(
            np.linalg.norm(self.U)**2 + np.linalg.norm(self.U1)**2)

        # Final residual values and stopping tolerances depend on
        # whether standard or normalised residuals are specified via the
        # options object
        if self.opt['AutoRho', 'StdResiduals']:
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                rn*self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                sn*self.opt['RelStopTol']
        else:
            if rn == 0.0:
                rn = 1.0
            if sn == 0.0:
                sn = 1.0
            r /= rn
            s /= sn
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua
Ejemplo n.º 28
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f, axis=self.cri.axisK),
                         self.cri.Nv, self.cri.axisN)
Ejemplo n.º 29
0
    def cnst_A1(self, X, Xf=None):
        r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
        constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
        \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.cri.axisN)
        return sl.irfftn(
            sl.inner(self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM),
            self.cri.Nv, self.cri.axisN)
Ejemplo n.º 30
0
    def proximal_step(self, gradf=None):
        """ Compute proximal update (gradient descent + constraint).
        """

        if gradf is None:
            gradf = self.eval_gradf()

        Vf = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(Vf, self.cri.Nv, self.cri.axisN)

        self.X = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
Ejemplo n.º 31
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Ejemplo n.º 32
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Ejemplo n.º 33
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B.T, np.conj(self.Df) * Y0f, axis=self.cri.axisC),
                   self.cri.Nv, self.cri.axisN)
Ejemplo n.º 34
0
    def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):

        if opt is None:
            opt = ConvBPDNRecTV.Options()

        # Infer problem dimensions and set relevant attributes of self
        self.cri = cbpdn.ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)

        # Call parent class __init__
        Nx = np.product(self.cri.shpX)
        yshape = list(self.cri.shpX)
        yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd
        super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape,
                                                 S.dtype, opt)

        # Set l1 term scaling and weight array
        self.lmbda = self.dtype.type(lmbda)
        self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
        self.mu = self.dtype.type(mu)
        if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:
            self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) +
                                  opt['TVWeight'].shape), dtype=self.dtype)
        else:
            # Wtv is a scalar: no need to change shape
            self.Wtv = self.dtype.type(opt['TVWeight'])

        # Set penalty parameter
        self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
                      dtype=self.dtype)

        # Set rho_xi attribute
        self.set_attr('rho_xi', opt['AutoRho','RsdlTarget'], dval=1.0,
                      dtype=self.dtype)

        # Reshape D and S to standard layout
        self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
        self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)

        # Compute signal in DFT domain
        self.Sf = sl.rfftn(self.S, None, self.cri.axisN)

        self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN,
                                           self.cri.Nv, dtype=self.dtype)

        # Initialise byte-aligned arrays for pyfftw
        self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
        xfshp = list(self.cri.shpX)
        xfshp[dimN-1] = xfshp[dimN-1]//2 + 1
        self.Xf = sl.pyfftw_empty_aligned(xfshp,
                            dtype=sl.complex_dtype(self.dtype))

        self.setdict()
Ejemplo n.º 35
0
    def setcoef(self, Z):
        """Set coefficient array."""

        # This method largely replicates the method from parent class
        # ConvCnstrMOD_Consensus that it overrides. The inherited
        # method is overridden to avoid the superfluous computation of
        # self.ZSf in that method, which is not required for the
        # modified algorithm with mask decoupling
        if self.cri.Cd == 1 and self.cri.C > 1:
            Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) +
                          (self.cri.M,))
        self.Z = np.asarray(Z, dtype=self.dtype)
        self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
Ejemplo n.º 36
0
    def xstep(self, S, W, lmbda, dimK):
        """Solve CSC problem for training data `S`."""

        if self.opt['CUDA_CBPDN']:
            Z = cucbpdn.cbpdnmsk(self.D.squeeze(), S[..., 0], W.squeeze(),
                                 lmbda, self.opt['CBPDN'])
            Z = Z.reshape(self.cri.Nv + (1, 1, self.cri.M,))
            self.Z[:] = np.asarray(Z, dtype=self.dtype)
            self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
            self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv,
                               self.cri.axisN)
            self.xstep_itstat = None
        else:
            # Create X update object (external representation is expected!)
            xstep = cbpdn.ConvBPDNMaskDcpl(self.D.squeeze(), S, lmbda, W,
                                           self.opt['CBPDN'], dimK=dimK,
                                           dimN=self.cri.dimN)
            xstep.solve()
            self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv,
                               self.cri.axisN)
            self.setcoef(xstep.getcoef())
            self.xstep_itstat = xstep.itstat[-1] if xstep.itstat else None
Ejemplo n.º 37
0
    def xstep(self):
        """Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`."""

        b = self.AHSf + np.sum(np.conj(self.GAf) *
            sl.rfftn(self.Y-self.U, axes=self.axes), axis=self.Y.ndim-1)
        self.Xf = b / (self.AHAf + self.GHGf)
        self.X = sl.irfftn(self.Xf, None, axes=self.axes)

        if self.opt['LinSolveCheck']:
            ax = (self.AHAf + self.GHGf)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Ejemplo n.º 38
0
    def setdict(self, D=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)

        self.GDf = self.Gf * (self.Wtv * self.Df)[..., np.newaxis]

        # Compute D^H S
        self.DSf = np.conj(self.Df) * self.Sf
        if self.cri.Cd > 1:
            self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)
Ejemplo n.º 39
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * sl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = sl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = sl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 40
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * sl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = sl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = sl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Ejemplo n.º 41
0
    def setdict(self, D=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)

        self.GDf = self.Gf * (self.Wtv * self.Df)[..., np.newaxis]

        # Compute D^H S
        self.DSf = np.conj(self.Df) * self.Sf
        if self.cri.Cd > 1:
            self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)
Ejemplo n.º 42
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*sl.rfftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(self.Zf, [i],
                axis=self.cri.axisK), self.rho, b, axis=self.cri.axisM)
        self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
                                   self.cri.axisN)
Ejemplo n.º 43
0
    def setcoef(self, Z):
        """Set coefficient array."""

        # This method largely replicates the method from parent class
        # ConvCnstrMOD_Consensus that it overrides. The inherited
        # method is overridden to avoid the superfluous computation of
        # self.ZSf in that method, which is not required for the
        # modified algorithm with mask decoupling
        if self.cri.Cd == 1 and self.cri.C > 1:
            Z = Z.reshape(self.cri.Nv + (1, ) + (self.cri.Cx * self.cri.K, ) +
                          (self.cri.M, ))
        self.Z = np.asarray(Z, dtype=self.dtype)
        self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
Ejemplo n.º 44
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.cgit = None
        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, self.rho, b,
                                           self.cri.axisM, self.cri.axisK,
                                           self.opt['CG', 'StopTol'],
                                           self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Ejemplo n.º 45
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.cgit = None
        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho * sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, self.rho, b,
                                           self.cri.axisM, self.cri.axisK,
                                           self.opt['CG', 'StopTol'],
                                           self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Ejemplo n.º 46
0
    def setcoef(self, Z):
        """Set coefficient array."""

        # If the dictionary has a single channel but the input (and
        # therefore also the coefficient map array) has multiple
        # channels, the channel index and multiple image index have
        # the same behaviour in the dictionary update equation: the
        # simplest way to handle this is to just reshape so that the
        # channels also appear on the multiple image index.
        if self.cri.Cd == 1 and self.cri.C > 1:
            Z = Z.reshape(self.cri.Nv + (1, ) + (self.cri.Cx * self.cri.K, ) +
                          (self.cri.M, ))
        self.Z[:] = np.asarray(Z, dtype=self.dtype)
        self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
Ejemplo n.º 47
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*sl.rfftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(
            self.Zf, [i], axis=self.cri.axisK),
                                         self.rho, b, axis=self.cri.axisM)
        self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
                                   self.cri.axisN)
Ejemplo n.º 48
0
    def setdict(self, D=None):
        """Set dictionary array."""

        if D is not None:
            self.D = np.asarray(D, dtype=self.dtype)
        self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
        # Compute D^H S
        self.DSf = np.conj(self.Df) * self.Sf
        if self.cri.Cd > 1:
            self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)
        if self.opt['HighMemSolve'] and self.cri.Cd == 1:
            self.c = sl.solvedbi_sm_c(self.Df, np.conj(self.Df),
                        self.rho*self.GHGf + self.rho, self.cri.axisM)
        else:
            self.c = None
Ejemplo n.º 49
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        if self.opt['AccurateDFid']:
            if self.dmethod == 'fista':
                D = self.dstep.getdict(crop=False)
            else:
                D = self.dstep.var_y()
            if self.xmethod == 'fista':
                X = self.xstep.getcoef()
            else:
                X = self.xstep.var_y()
            Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
            Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
            Sf = self.xstep.Sf
            Ef = sl.inner(Df, Xf, axis=self.xstep.cri.axisM) - Sf
            dfd = sl.rfl2norm2(
                Ef, self.xstep.S.shape, axis=self.xstep.cri.axisN) / 2.0
            rl1 = np.sum(np.abs(X))
            return dict(DFid=dfd,
                        RegL1=rl1,
                        ObjFun=dfd + self.xstep.lmbda * rl1)
        else:
            return None
Ejemplo n.º 50
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(self.Zf[..., [i], :],
                                                 self.rho,
                                                 b[..., i],
                                                 axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho * sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho * Xf,
                        axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Ejemplo n.º 51
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Ejemplo n.º 52
0
def cbpdn_setdict():
    """Set the dictionary for the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    global mp_DSf
    # Set working dictionary for cbpdn step and compute DFT of dictionary
    # D and of D^T S
    mp_Df[:] = sl.rfftn(mp_D_Y, mp_cri.Nv, mp_cri.axisN)
    if mp_cri.Cd == 1:
        mp_DSf[:] = np.conj(mp_Df) * mp_Sf
    else:
        mp_DSf[:] = sl.inner(np.conj(mp_Df[np.newaxis, ...]),
                             mp_Sf,
                             axis=mp_cri.axisC + 1)
Ejemplo n.º 53
0
    def obfn_f(self, Xf=None):
        r"""Compute data fidelity term :math:`(1/2) \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{m} - \mathbf{s}) \|_2^2`.
        This is used for backtracking. Since the backtracking is
        computed in the DFT, it is important to preserve the
        DFT scaling.
        """

        if Xf is None:
            Xf = self.Xf

        Rf = self.eval_Rf(Xf)
        R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN)
        WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN)

        return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
Ejemplo n.º 54
0
    def proximal_step(self, gradf=None):
        """Compute proximal update (gradient descent + constraint).
        Variables are mapped back and forth between input and
        frequency domains.
        """

        if gradf is None:
            gradf = self.eval_grad()

        self.Vf[:] = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)

        self.X[:] = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)

        return gradf
Ejemplo n.º 55
0
    def xstep(self):
        """The xstep of the baseline consensus class from which this
        class is derived is re-used to implement the xstep of the
        modified algorithm by replacing ``self.ZSf``, which is constant
        in the baseline algorithm, with a quantity derived from the
        additional variables ``self.Y1`` and ``self.U1``. It is also
        necessary to set the penalty parameter to unity for the duration
        of the x step.
        """

        self.YU1[:] = self.Y1 - self.U1
        self.ZSf = np.conj(
            self.Zf) * (self.Sf + sl.rfftn(self.YU1, None, self.cri.axisN))
        rho = self.rho
        self.rho = 1.0
        super(ConvCnstrMODMaskDcpl_Consensus, self).xstep()
        self.rho = rho
Ejemplo n.º 56
0
    def setcoef(self, A):
        """Set coefficient array."""

        # If the dictionary has a single channel but the input (and
        # therefore also the coefficient map array) has multiple
        # channels, the channel index and multiple image index have
        # the same behaviour in the dictionary update equation: the
        # simplest way to handle this is to just reshape so that the
        # channels also appear on the multiple image index.
        if self.cri.Cd == 1 and self.cri.C > 1:
            A = A.reshape(self.cri.Nv + (1, ) + (self.cri.Cx * self.cri.K, ) +
                          (self.cri.M, ))
        self.A = np.asarray(A, dtype=self.dtype)

        self.Af = sl.rfftn(self.A, self.cri.Nv, self.cri.axisN)
        # Compute X^H S
        self.ASf = np.sum(np.conj(self.Af) * self.Sf,
                          self.cri.axisK,
                          keepdims=True)
Ejemplo n.º 57
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.cgit = None

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, 1.0, b,
                                self.cri.axisM, self.cri.axisK,
                                self.opt['CG', 'StopTol'],
                                self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Ejemplo n.º 58
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute D X - S
        self.Ryf[:] = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        self.WRy[:] = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)

        gradf = np.conj(self.Df) * WRyf

        # Multiple channel signal, multiple channel dictionary
        if self.cri.Cd > 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Ejemplo n.º 59
0
    def eval_gradf(self):
        """Compute gradient in Fourier domain."""

        # Compute X D - S
        self.Ryf = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        WRy = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(WRy, self.cri.Nv, self.cri.axisN)

        gradf = sl.inner(np.conj(self.Zf), WRyf, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Ejemplo n.º 60
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC)
        b = self.DSfBQ + self.rho * ZfQ

        Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) * sl.inner(
                self.Df, self.Xf, axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = DDXfBB + self.rho * self.Xf
            b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \
                self.rho * Zf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None