Exemplo n.º 1
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(
        mp_Y1[mp_grp[i]:mp_grp[i + 1]] -
        1 / mp_alpha * mp_U1[mp_grp[i]:mp_grp[i + 1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i + 1]]) * YU0f + mp_alpha**2 * YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i + 1]],
                            mp_alpha**2,
                            b,
                            mp_cache[i],
                            axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i + 1]]), YU0f,
                     axis=mp_C) + mp_alpha**2 * YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i + 1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i + 1]] = sl.irfftn(Xf, mp_Nv, mp_axisN)
    mp_DX[i] = sl.irfftn(
        sl.inner(mp_Df[mp_grp[i]:mp_grp[i + 1]], Xf, mp_axisM), mp_Nv,
        mp_axisN)
Exemplo n.º 2
0
def ccmodmd_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_D_Y0 - mp_D_U0[k]
    YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k]
    b = spl.rfftn(YU0, None, mp_cri.axisN) + \
      np.conj(mp_Zf[k]) * spl.rfftn(YU1, None, mp_cri.axisN)
    Xf = spl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM)
    mp_D_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = spl.irfftn(spl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
Exemplo n.º 3
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(
                    self.Zf[..., [i], :], self.rho, b[..., i],
                    axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)


        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemplo n.º 4
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.X
        Xf = sl.rfftn(X, None, self.cri.axisN)
        Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemplo n.º 5
0
 def obfn_dfd(self):
     r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
     \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
     """
     XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN)
     DX = np.moveaxis(
         sl.irfftn(sl.inner(mp_Df, XF, mp_axisM), mp_Nv, mp_axisN),
         mp_axisM, self.cri.axisM)
     return np.sum((self.W * (DX - self.S))**2) / 2.0
Exemplo n.º 6
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{m} - \mathbf{s}) \|_2^2`
        """

        Ef = self.eval_Rf(self.Xf)
        E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)

        return (np.linalg.norm(self.W * E)**2) / 2.0
Exemplo n.º 7
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
                                                  self.cri.axisN))**2) / 2.0
Exemplo n.º 8
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * spl.rfftn(YU0, None, mp_cri.axisN) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = spl.inner(np.conj(mp_Df), spl.rfftn(YU0, None, mp_cri.axisN),
                      axis=mp_cri.axisC) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = spl.irfftn(spl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Exemplo n.º 9
0
def ccmod_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_D_Y - mp_D_U[k]
    b = mp_ZSf[k] + mp_drho * spl.rfftn(YU, None, mp_cri.axisN)
    Xf = spl.solvedbi_sm(mp_Zf[k], mp_drho, b, axis=mp_cri.axisM)
    mp_D_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Exemplo n.º 10
0
    def reconstruct(self, D=None):
        """Reconstruct representation."""

        if D is None:
            Df = self.Xf
        else:
            Df = sl.rfftn(D, None, self.cri.axisN)

        Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemplo n.º 11
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemplo n.º 12
0
    def compute_residuals(self):
        """Compute residuals and stopping thresholds. The parent class
        method is overridden to ensure that the residual calculations
        include the additional variables introduced in the modification
        to the baseline algorithm.
        """

        # The full primary residual is straightforward to compute from
        # the primary residuals for the baseline algorithm and for the
        # additional variables
        r0 = self.rsdl_r(self.AXnr, self.Y)
        r1 = self.AX1nr - self.Y1 - self.S
        r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))

        # The full dual residual is more complicated to compute than the
        # full primary residual
        ATU = self.swapaxes(self.U) + sl.irfftn(
            np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
            self.cri.Nv, self.cri.axisN)
        s = self.rho * np.linalg.norm(ATU)

        # The normalisation factor for the full primal residual is also not
        # straightforward
        nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
                      np.linalg.norm(self.AX1nr)**2)
        nY = np.sqrt(np.linalg.norm(self.Y)**2 +
                     np.linalg.norm(self.Y1)**2)
        rn = max(nAX, nY, np.linalg.norm(self.S))

        # The normalisation factor for the full dual residual is
        # straightforward to compute
        sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
                                np.linalg.norm(self.U1)**2)

        # Final residual values and stopping tolerances depend on
        # whether standard or normalised residuals are specified via the
        # options object
        if self.opt['AutoRho', 'StdResiduals']:
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                rn*self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                sn*self.opt['RelStopTol']
        else:
            if rn == 0.0:
                rn = 1.0
            if sn == 0.0:
                sn = 1.0
            r /= rn
            s /= sn
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua
Exemplo n.º 13
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.getdict(crop=False)
        if X is None:
            X = self.getcoef()
        Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = sl.inner(Df, Xf, axis=self.xstep.cri.axisM)
        return sl.irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Exemplo n.º 14
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Exemplo n.º 15
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f,
                                  axis=self.cri.axisK), self.cri.Nv,
                         self.cri.axisN)
Exemplo n.º 16
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * spl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = spl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = spl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Exemplo n.º 17
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*sl.rfftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(
            self.Zf, [i], axis=self.cri.axisK),
                                         self.rho, b, axis=self.cri.axisM)
        self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
                                   self.cri.axisN)
Exemplo n.º 18
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.cgit = None
        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, self.rho, b,
                                           self.cri.axisM, self.cri.axisK,
                                           self.opt['CG', 'StopTol'],
                                           self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemplo n.º 19
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemplo n.º 20
0
    def obfn_f(self, Xf=None):
        r"""Compute data fidelity term :math:`(1/2) \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{m} - \mathbf{s}) \|_2^2`.
        This is used for backtracking. Since the backtracking is
        computed in the DFT, it is important to preserve the
        DFT scaling.
        """

        if Xf is None:
            Xf = self.Xf

        Rf = self.eval_Rf(Xf)
        R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN)
        WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN)

        return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
Exemplo n.º 21
0
    def proximal_step(self, gradf=None):
        """Compute proximal update (gradient descent + constraint).
        Variables are mapped back and forth between input and
        frequency domains.
        """

        if gradf is None:
            gradf = self.eval_grad()

        self.Vf[:] = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)

        self.X[:] = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)

        return gradf
Exemplo n.º 22
0
    def relax_AX(self):
        """The parent class method that this method overrides only
        implements the relaxation step for the variables of the baseline
        consensus algorithm. This method calls the overridden method and
        then implements the relaxation step for the additional variables
        required for the mask decoupling modification to the baseline
        algorithm.
        """

        super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
        self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
                                        axis=self.cri.axisM),
                               self.cri.Nv, self.cri.axisN)
        if self.rlx == 1.0:
            self.AX1 = self.AX1nr
        else:
            alpha = self.rlx
            self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
Exemplo n.º 23
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.cgit = None

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:], cgit = sl.solvemdbi_cg(
            self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK,
            self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemplo n.º 24
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute D X - S
        self.Ryf[:] = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        self.WRy[:] = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)

        gradf = np.conj(self.Df) * WRyf

        # Multiple channel signal, multiple channel dictionary
        if self.cri.Cd > 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Exemplo n.º 25
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        if self.opt['AccurateDFid']:
            DX = self.reconstruct()
            W = self.dstep.W
            S = self.dstep.S
        else:
            W = mp_W
            S = mp_S
            Xf = mp_Zf
            Df = mp_Df
            DX = spl.irfftn(
                spl.inner(Df[np.newaxis, ...],
                          Xf,
                          axis=self.xstep.cri.axisM + 1), self.xstep.cri.Nv,
                np.array(self.xstep.cri.axisN) + 1)

        dfd = (np.linalg.norm(W * (DX - S))**2) / 2.0
        rl1 = np.sum(np.abs(self.getcoef()))
        obj = dfd + self.xstep.lmbda * rl1

        return (obj, dfd, rl1)