Exemple #1
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Exemple #2
0
def ccmodmd_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_D_Y0 - mp_D_U0[k]
    YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k]
    b = sl.rfftn(YU0, None, mp_cri.axisN) + \
      np.conj(mp_Zf[k]) * sl.rfftn(YU1, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM)
    mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
Exemple #3
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #4
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(
                self.Df, self.rho*self.GHGf + self.rho, b, self.c,
                self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(
                self.Df, self.rho*self.GHGf + self.rho, b, self.cri.axisM,
                self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #5
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(
                    self.Zf[..., [i], :], self.rho, b[..., i],
                    axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)


        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #6
0
    def reconstruct(self, X=None):
        """Reconstruct representation."""

        if X is None:
            X = self.X
        Xf = sl.rfftn(X, None, self.cri.axisN)
        Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemple #7
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
                                                  self.cri.axisN))**2) / 2.0
Exemple #8
0
    def cnst_AT(self, X):
        r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
        a component of ADMM problem constraint. In this case
        :math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T) \mathbf{x}`.
        """

        Xf = sl.rfftn(X, axes=self.axes)
        return np.sum(sl.irfftn(np.conj(self.Gf)*Xf, self.axsz,
                                axes=self.axes), axis=self.Y.ndim-1)
Exemple #9
0
 def obfn_dfd(self):
     r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
     \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
     """
     XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN)
     DX = np.moveaxis(sl.irfftn(sl.inner(mp_Df, XF, mp_axisM),
                                mp_Nv, mp_axisN), mp_axisM,
                      self.cri.axisM)
     return np.sum((self.W*(DX-self.S))**2)/2.0
Exemple #10
0
    def cnst_A1T(self, Y1):
        r"""Compute :math:`A_1^T \mathbf{y}_1` component of
        :math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
        (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`.
        """

        Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
        return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
                         self.cri.axisN)
Exemple #11
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
        """

        Ef = self.eval_Rf(self.Xf)
        E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)

        return (np.linalg.norm(self.W * E)**2) / 2.0
Exemple #12
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
                      axis=mp_cri.axisC) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Exemple #13
0
    def cnst_A(self, X, Xf=None):
        r"""Compute :math:`A \mathbf{x}` component of ADMM problem
        constraint.  In this case :math:`A \mathbf{x} = (G_r^T \;\;
        G_c^T)^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.axes)
        return sl.irfftn(self.Gf*Xf[..., np.newaxis], self.axsz,
                         axes=self.axes)
Exemple #14
0
    def cnst_A0T(self, X):
        r"""Compute :math:`A_0^T \mathbf{x}` where :math:`A_0 \mathbf{x}`
        is a component of the ADMM problem constraint. In this case
        :math:`A_0^T \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )
        \mathbf{x}`.
        """

        Xf = sl.rfftn(X, axes=self.cri.axisN)
        return self.Wtv[..., np.newaxis] * sl.irfftn(
            np.conj(self.Gf) * Xf[..., 0:-1], self.cri.Nv, axes=self.cri.axisN)
Exemple #15
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
        \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.cri.axisN)
        return self.Wtv[..., np.newaxis] * sl.irfftn(
            self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN)
Exemple #16
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Exemple #17
0
def ccmod_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_D_Y - mp_D_U[k]
    b = mp_ZSf[k] + mp_drho * sl.rfftn(YU, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], mp_drho, b, axis=mp_cri.axisM)
    mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Exemple #18
0
    def reconstruct(self, D=None):
        """Reconstruct representation."""

        if D is None:
            Df = self.Xf
        else:
            Df = sl.rfftn(D, None, self.cri.axisN)

        Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
        return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
Exemple #19
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #20
0
    def compute_residuals(self):
        """Compute residuals and stopping thresholds. The parent class
        method is overridden to ensure that the residual calculations
        include the additional variables introduced in the modification
        to the baseline algorithm.
        """

        # The full primary residual is straightforward to compute from
        # the primary residuals for the baseline algorithm and for the
        # additional variables
        r0 = self.rsdl_r(self.AXnr, self.Y)
        r1 = self.AX1nr - self.Y1 - self.S
        r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))

        # The full dual residual is more complicated to compute than the
        # full primary residual
        ATU = self.swapaxes(self.U) + sl.irfftn(
            np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
            self.cri.Nv, self.cri.axisN)
        s = self.rho * np.linalg.norm(ATU)

        # The normalisation factor for the full primal residual is also not
        # straightforward
        nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
                      np.linalg.norm(self.AX1nr)**2)
        nY = np.sqrt(np.linalg.norm(self.Y)**2 +
                     np.linalg.norm(self.Y1)**2)
        rn = max(nAX, nY, np.linalg.norm(self.S))

        # The normalisation factor for the full dual residual is
        # straightforward to compute
        sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
                                np.linalg.norm(self.U1)**2)

        # Final residual values and stopping tolerances depend on
        # whether standard or normalised residuals are specified via the
        # options object
        if self.opt['AutoRho', 'StdResiduals']:
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                rn*self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                sn*self.opt['RelStopTol']
        else:
            if rn == 0.0:
                rn = 1.0
            if sn == 0.0:
                sn = 1.0
            r /= rn
            s /= sn
            epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua
Exemple #21
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.getdict(crop=False)
        if X is None:
            X = self.getcoef()
        Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = sl.inner(Df, Xf, axis=self.xstep.cri.axisM)
        return sl.irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Exemple #22
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B.T, np.conj(self.Df) * Y0f, axis=self.cri.axisC),
                   self.cri.Nv, self.cri.axisN)
Exemple #23
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Exemple #24
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * sl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = sl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = sl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Exemple #25
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.cgit = None
        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho * sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, self.rho, b,
                                           self.cri.axisM, self.cri.axisK,
                                           self.opt['CG', 'StopTol'],
                                           self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #26
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.cgit = None
        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, self.rho, b,
                                           self.cri.axisM, self.cri.axisK,
                                           self.opt['CG', 'StopTol'],
                                           self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #27
0
    def xstep(self):
        """Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`."""

        b = self.AHSf + self.rho * np.sum(
            np.conj(self.Gf) * sl.rfftn(self.Y - self.U, axes=self.axes),
            axis=self.Y.ndim - 1)
        self.Xf = b / (self.AHAf + self.rho * self.GHGf)
        self.X = sl.irfftn(self.Xf, None, axes=self.axes)

        if self.opt['LinSolveCheck']:
            ax = (self.AHAf + self.rho * self.GHGf) * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #28
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*sl.rfftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(
            self.Zf, [i], axis=self.cri.axisK),
                                         self.rho, b, axis=self.cri.axisM)
        self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
                                   self.cri.axisN)
Exemple #29
0
    def proximal_step(self, gradf=None):
        """ Compute proximal update (gradient descent + constraint)

        """

        if gradf is None:
            gradf = self.eval_gradf()


        Vf = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(Vf, self.cri.Nv, self.cri.axisN)

        self.X = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
Exemple #30
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #31
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #32
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        b = self.AHSf + self.rho*np.sum(
            np.conj(self.Gf)*sl.rfftn(self.Y-self.U, axes=self.axes),
            axis=self.Y.ndim-1)
        self.Xf = b / (self.AHAf + self.rho*self.GHGf)
        self.X = sl.irfftn(self.Xf, self.axsz, axes=self.axes)

        if self.opt['LinSolveCheck']:
            ax = (self.AHAf + self.rho*self.GHGf)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #33
0
    def proximal_step(self, gradf=None):
        """Compute proximal update (gradient descent + constraint).
        Variables are mapped back and forth between input and
        frequency domains.
        """

        if gradf is None:
            gradf = self.eval_grad()

        self.Vf[:] = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)

        self.X[:] = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)

        return gradf
Exemple #34
0
    def proximal_step(self, gradf=None):
        """Compute proximal update (gradient descent + constraint).
        Variables are mapped back and forth between input and
        frequency domains.
        """

        if gradf is None:
            gradf = self.eval_grad()

        self.Vf[:] = self.Yf - (1. / self.L) * gradf
        V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)

        self.X[:] = self.eval_proxop(V)
        self.Xf = sl.rfftn(self.X, None, self.cri.axisN)

        return gradf
Exemple #35
0
    def xistep(self, i):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
        component :math:`\mathbf{x}_i`.
        """

        self.YU[:] = self.Y - self.U[..., i]
        b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
            self.rho*sl.rfftn(self.YU, None, self.cri.axisN)

        self.Xf[..., i] = sl.solvedbi_sm(np.take(self.Zf, [i],
                                                 axis=self.cri.axisK),
                                         self.rho,
                                         b,
                                         axis=self.cri.axisM)
        self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
                                   self.cri.axisN)
Exemple #36
0
    def obfn_f(self, Xf=None):
        r"""Compute data fidelity term :math:`(1/2) \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{m} - \mathbf{s}) \|_2^2`.
        This is used for backtracking. Since the backtracking is
        computed in the DFT, it is important to preserve the
        DFT scaling.
        """

        if Xf is None:
            Xf = self.Xf

        Rf = self.eval_Rf(Xf)
        R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN)
        WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN)

        return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
Exemple #37
0
    def obfn_f(self, Xf=None):
        r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`.
        This is used for backtracking. Since the backtracking is
        computed in the DFT, it is important to preserve the
        DFT scaling.
        """

        if Xf is None:
            Xf = self.Xf

        Rf = self.eval_Rf(Xf)
        R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN)
        WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN)

        return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
Exemple #38
0
    def relax_AX(self):
        """The parent class method that this method overrides only
        implements the relaxation step for the variables of the baseline
        consensus algorithm. This method calls the overridden method and
        then implements the relaxation step for the additional variables
        required for the mask decoupling modification to the baseline
        algorithm.
        """

        super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
        self.AX1nr = sl.irfftn(
            sl.inner(self.Zf, self.swapaxes(self.Xf), axis=self.cri.axisM),
            self.cri.Nv, self.cri.axisN)
        if self.rlx == 1.0:
            self.AX1 = self.AX1nr
        else:
            alpha = self.rlx
            self.AX1 = alpha * self.AX1nr + (1 - alpha) * (self.Y1 + self.S)
Exemple #39
0
    def eval_gradf(self):
        """Compute gradient in Fourier domain."""

        # Compute X D - S
        self.Ryf = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        WRy = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(WRy, self.cri.Nv, self.cri.axisN)

        gradf = sl.inner(np.conj(self.Zf), WRyf, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Exemple #40
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute D X - S
        self.Ryf[:] = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        self.WRy[:] = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)

        gradf = np.conj(self.Df) * WRyf

        # Multiple channel signal, multiple channel dictionary
        if self.cri.Cd > 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Exemple #41
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.cgit = None

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, 1.0, b,
                                self.cri.axisM, self.cri.axisK,
                                self.opt['CG', 'StopTol'],
                                self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Exemple #42
0
def derivD_spdomain(cri, Xr, Sr, Df, Xf, dict_Nv):
    B = sl.irfftn(sl.inner(Df, Xf, axis=cri.axisM), s=cri.Nv, axes=cri.axisN) - Sr
    B = B[np.newaxis, np.newaxis,]
    Xshifted = np.ones(dict_Nv + Xr.shape) * Xr
    
    N1 = 0
    N2 = 1
    I = 2
    J = 3

    print("start shifting")
    for n1 in range(dict_Nv[0]):
        for n2 in range(dict_Nv[1]):
            Xshifted[n1][n2] = np.roll(Xshifted[n1][n2], (n1, n2), axis=(I, J))
            # print("shifted ", (n1, n2))
    ret = np.sum(np.conj(B) * Xshifted, axis=(I, J, 2 + cri.axisK), keepdims=True)
    print(ret.shape)
    ret = ret[:, :, 0, 0]
    print(ret.shape)
    return ret
Exemple #43
0
    def solve(self):
        """Call the solve method of the inner KConvBPDN object and return the
        result.
        """

        itst = []

        # Main optimisation iterations
        for self.j in range(self.j, self.j + self.opt['MaxMainIter']):

            for l in range(self.cri.dimN):

                # Pre x-step
                Wl = self.convolvedict(l)  # convolvedict
                self.xstep[l].setdictf(Wl)  # setdictf

                # Solve KCSC
                self.xstep[l].solve()

                # Post x-step
                Kl = np.moveaxis(self.xstep[l].getcoef().squeeze(), [0, 1],
                                 [1, 0])
                self.Kf[l] = sl.fftn(Kl, None, [0])  # Update Kruskal

                # IterationStats
                xitstat = self.xstep.itstat[-1] if self.xstep.itstat else \
                          self.xstep.IterationStats(
                              *([0.0,] * len(self.xstep.IterationStats._fields)))

                itst += self.isc_lst[l].iterstats(self.j, 0, xitstat,
                                                  0)  # Accumulate

            self.itstat.append(
                self.isc(*itst))  # Cast to global itstats and store

        # Decomposed ifftn
        for l in range(self.cri.dimN):
            self.K[l] = sl.irfftn(self.Kf[l], self.cri.Nv[l],
                                  [0])  # ifft transform

        self.j += 1
Exemple #44
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC)
        b = self.DSfBQ + self.rho * ZfQ

        Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) * sl.inner(
                self.Df, self.Xf, axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = DDXfBB + self.rho * self.Xf
            b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \
                self.rho * Zf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #45
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        if self.opt['AccurateDFid']:
            DX = self.reconstruct()
            W = self.dstep.W
            S = self.dstep.S
        else:
            W = mp_W
            S = mp_S
            Xf = mp_Zf
            Df = mp_Df
            DX = sl.irfftn(
                sl.inner(Df[np.newaxis, ...],
                         Xf,
                         axis=self.xstep.cri.axisM + 1), self.xstep.cri.Nv,
                np.array(self.xstep.cri.axisN) + 1)

        dfd = (np.linalg.norm(W * (DX - S))**2) / 2.0
        rl1 = np.sum(np.abs(self.getcoef()))
        obj = dfd + self.xstep.lmbda * rl1

        return (obj, dfd, rl1)
Exemple #46
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(self.Zf[..., [i], :],
                                                 self.rho,
                                                 b[..., i],
                                                 axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho * sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho * Xf,
                        axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Exemple #47
0
    def dstep(self):
        """Compute dictionary update for training data of preceding
        :meth:`xstep`.
        """

        # Compute X D - S
        Ryf = sl.inner(self.Zf, self.Df, axis=self.cri.axisM) - self.Sf
        # Compute gradient
        gradf = sl.inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK)

        # If multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        # Update gradient step
        self.eta = self.eta_a / (self.j + self.eta_b)

        # Compute gradient descent
        self.Gf[:] = self.Df - self.eta * gradf
        self.G = sl.irfftn(self.Gf, self.cri.Nv, self.cri.axisN)

        # Eval proximal operator
        self.Dprv[:] = self.D
        self.D[:] = self.Pcn(self.G)
Exemple #48
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        YUf0 = self.block_sep0(YUf)
        YUf1 = self.block_sep1(YUf)

        b = self.rho * np.sum(np.conj(self.GDf) * YUf1, axis=-1)
        if self.cri.Cd > 1:
            b = np.sum(b, axis=self.cri.axisC, keepdims=True)
        b += self.DSf + self.rho * YUf0

        # Concatenate multiple GDf components on axisC. For
        # single-channel signals, and multi-channel signals with a
        # single-channel dictionary, we end up with sl.solvemdbi_ism
        # solving a linear system of rank dimN+1 (corresponding to the
        # dictionary and a gradient operator per spatial dimension) plus
        # an identity. For multi-channel signals with a multi-channel
        # dictionary, we end up with sl.solvemdbi_ism solving a linear
        # system of rank C.d (dimN+1) (corresponding to the dictionary
        # and a gradient operator per spatial dimension for each
        # channel) plus an identity.

        # The structure of the linear system to be solved depends on the
        # number of channels in the signal and dictionary. Both branches are
        # the same in the single-channel signal case (the choice of handling
        # it via the 'else' branch is somewhat arbitrary).
        if self.cri.C > 1 and self.cri.Cd == 1:
            # Concatenate multiple GDf components on the final axis
            # of GDf (that indexes the number of gradient operators). For
            # multi-channel signals with a single-channel dictionary,
            # sl.solvemdbi_ism has to solve a linear system of rank dimN+1
            # (corresponding to the dictionary and a gradient operator per
            # spatial dimension)
            DfGDf = np.concatenate([
                self.Df[..., np.newaxis],
            ] + [
                np.sqrt(self.rho) * self.GDf[..., k, np.newaxis]
                for k in range(self.GDf.shape[-1])
            ],
                                   axis=-1)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b[..., np.newaxis],
                                          self.cri.axisM, -1)[..., 0]
        else:
            # Concatenate multiple GDf components on axisC. For multi-channel
            # signals with a multi-channel dictionary, sl.solvemdbi_ism has
            # to solve a linear system of rank C.d (dimN+1) (corresponding to
            # the dictionary and a gradient operator per spatial dimension
            # for each channel) plus an identity.
            DfGDf = np.concatenate([
                self.Df,
            ] + [
                np.sqrt(self.rho) * self.GDf[..., k]
                for k in range(self.GDf.shape[-1])
            ],
                                   axis=self.cri.axisC)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            if self.cri.C > 1 and self.cri.Cd == 1:
                Dop = lambda x: sl.inner(
                    DfGDf, x[..., np.newaxis], axis=self.cri.axisM)
                DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=-1)
                ax = DHop(Dop(self.Xf))[..., 0] + self.rho * self.Xf
            else:
                Dop = lambda x: sl.inner(DfGDf, x, axis=self.cri.axisM)
                DHop = lambda x: sl.inner(
                    np.conj(DfGDf), x, axis=self.cri.axisC)
                ax = DHop(Dop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
def mysolve(cri,
            Dr0,
            Xr,
            Sr,
            final_sigma,
            maxitr=40,
            param_mu=1,
            debug_dir=None):
    Dr = Dr0.copy()
    Xr = Xr.copy()
    Sr = Sr.copy()

    #離散フーリエ変換
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
    Sf = sl.rfftn(Sr, s=cri.Nv, axes=cri.axisN)
    Xf = sl.rfftn(Xr, s=cri.Nv, axes=cri.axisN)
    alpha = 1e0

    # sigma set
    first_sigma = Xr.max() * 4
    # σ←cσ(c < 1)のcの決定
    c = (final_sigma / first_sigma)**(1 / (maxitr - 1))
    print("c = %.8f" % c)
    sigma_list = []
    sigma_list.append(first_sigma)
    for i in range(maxitr - 1):
        sigma_list.append(sigma_list[i] * c)
        print(sigma_list[-1])

    # 辞書のクロップする領域を添え字で指定
    crop_op = []
    for l in Dr.shape:
        crop_op.append(slice(0, l))
    crop_op = tuple(crop_op)
    print(crop_op)

    updcnt = 0
    for sigma in sigma_list:
        print("sigma = %.8f" % sigma)
        # print("l0norm: %f" % l0norm(Xr, sigma_list[-1]))
        # print('error1: ', l2norm(Sr - reconstruct(cri, Dr, Xr)))
        delta = Xr * np.exp(-(Xr * Xr) / (2 * sigma * sigma))
        # print("l2(Xr): %.6f, l2(delta): %.6f" % (l2norm(Xr), l2norm(delta)))
        Xr = Xr - param_mu * delta  # + np.random.randn(*Xr.shape)*sigma*1e-1
        Xf = sl.rfftn(Xr, cri.Nv, cri.axisN)
        # saveXhist(Xr, "./hist/%db.png" % reccnt)

        # print('error2: ', l2norm(Sr - reconstruct(cri, Dr, Xr)))

        # if debug_dir is not None:
        #     save_reconstructed(cri, Dr, Xr, Sr, debug_dir + '/%drecA.png' % updcnt)

        # DXf = sl.inner(Df, Xf, axis=cri.axisM)
        # gamma = (np.sum(np.conj(DXf) * Sf, axis=cri.axisN, keepdims=True) + np.sum(DXf * np.conj(Sf), axis=cri.axisN, keepdims=True)) / 2 / np.sum(np.conj(DXf) * DXf, axis=cri.axisN, keepdims=True)
        # print(gamma)
        # print(gamma.shape, ' * ', Xr.shape)
        # gamma = np.real(gamma)
        # Xr = Xr * gamma
        # Xf = to_frequency(cri, Xr)

        # if debug_dir is not None:
        #     save_reconstructed(cri, Dr, Xr, Sr, debug_dir + '/%drecB.png' % updcnt)

        # print('error3: ', l2norm(Sr - reconstruct(cri, Dr, Xr)))
        # print("max: ", np.max(Xr))

        # 辞書の勾配降下
        B = sl.inner(Xf, Df, axis=cri.axisM) - Sf
        derivDf = sl.inner(np.conj(Xf), B,
                           axis=cri.axisK)  # 目的関数(信号との二乗近似誤差)の勾配

        # derivDr = sl.irfftn(derivDf, s=cri.Nv, axes=cri.axisN)[crop_op]
        def func(alpha):
            Df_ = Df - alpha * derivDf
            Dr_ = sl.irfftn(Df_, s=cri.Nv, axes=cri.axisN)[crop_op]
            Df_ = sl.rfftn(Dr_, s=cri.Nv, axes=cri.axisN)
            Sf_ = sl.inner(Df_, Xf, axis=cri.axisM)
            return l2norm(Sr - sl.irfftn(Sf_, s=cri.Nv, axes=cri.axisN))

        choice = np.array([func(alpha / 2),
                           func(alpha),
                           func(alpha * 2)]).argmin()
        alpha *= [0.5, 1, 2][choice]
        print("alpha: ", alpha)
        Df = Df - alpha * derivDf

        # 辞書の射影
        Dr = sl.irfftn(Df, s=cri.Nv, axes=cri.axisN)
        Pcn = cnvrep.getPcn(Dr.shape, cri.Nv, cri.dimN, cri.dimCd,
                            zm=False)  # 射影関数のインスタンス化
        Dr = Pcn(Dr)
        Dr = Dr[crop_op]
        print(l2norm(Dr.T[0]))

        Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
        b = sl.inner(Df, Xf, axis=cri.axisM) - Sf
        c = sl.inner(Df, np.conj(Df), axis=cri.axisM)
        Xf = Xf - np.conj(Df) / c * b
        Xr = sl.irfftn(Xf, s=cri.Nv, axes=cri.axisN)

        # save_reconstructed(cri, Dr, Xr, Sr, debug_dir + "rec/%dd.png" % updcnt)
        # saveXhist(Xr, debug_dir + "hist/%db.png" % updcnt)
        updcnt += 1

    # print("l0 norm of final X: %d" % smoothedl0norm(Xr, 0.00001))
    plot.close()
    mplot.close()
    return Dr
 def func(alpha):
     Df_ = Df - alpha * derivDf
     Dr_ = sl.irfftn(Df_, s=cri.Nv, axes=cri.axisN)[crop_op]
     Df_ = sl.rfftn(Dr_, s=cri.Nv, axes=cri.axisN)
     Sf_ = sl.inner(Df_, Xf, axis=cri.axisM)
     return l2norm(Sr - sl.irfftn(Sf_, s=cri.Nv, axes=cri.axisN))
Exemple #51
0
def convert_to_X(Xf, cri):
    X = sl.irfftn(Xf, cri.Nv, cri.axisN).squeeze()
    return X
Exemple #52
0
def convert_to_S(Sf, cri):
    S = sl.irfftn(Sf, cri.Nv, cri.axisN).squeeze()
    return S
Exemple #53
0
def convert_to_D(Df, dsz, cri):
    D = sl.irfftn(Df, cri.Nv, cri.axisN)
    D = cr.bcrop(D, dsz, cri.dimN).squeeze()
    return D
Exemple #54
0
#                                   gray=True, idxexp=np.s_[:, 160:672])

img = mpimg.imread('barbara1.png')
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.1, img.shape)

print("Noisy image PSNR:    %5.2f dB" % sm.psnr(img, imgn))
npd = 16
fltlmbd = 5.0
imgnl, imgnh = util.tikhonov_filter(imgn, fltlmbd, npd)
D = util.convdicts()['G:8x8x32']
D = D[:, :, 0:14]
# D = np.random.randn(8, 8, 14)
imgnpl, imgnph = util.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = spl.irfftn(
    np.conj(spl.rfftn(D, imgnph.shape,
                      (0, 1))) * spl.rfftn(imgnph[..., np.newaxis], None,
                                           (0, 1)), imgnph.shape, (0, 1))
W = W**2
W = 1.0 / (np.maximum(np.abs(W), 1e-8))

lmbda = 1.5e-2
mu = 0.005
opt1 = cbpdn.ConvBPDN.Options({
    'Verbose': True,
    'MaxMainIter': 250,
    'HighMemSolve': True,
    'RelStopTol': 3e-3,
    'AuxVarObj': True,
    'L1Weight': W,
    'AutoRho': {
        'Enabled': False
def reconstruct(cri, Dr, Xr):
    Xf = sl.rfftn(Xr, s=cri.Nv, axes=cri.axisN)
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
    return sl.irfftn(sl.inner(Df, Xf, axis=cri.axisM),
                     s=cri.Nv,
                     axes=cri.axisN)
Exemple #56
0
def convDictLearn(cri,
                  Dr0,
                  dsz,
                  Sr,
                  final_sigma,
                  maxitr,
                  non_nega,
                  param_mu=1,
                  debug_dir=None):
    Dr = Dr0.copy()
    Sr = Sr.copy()

    # 係数をl2ノルム最小解で初期化
    Xr = l2norm_minimize(cri, Dr, Sr)

    # 2次元離散フーリエ変換
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
    Sf = sl.rfftn(Sr, s=cri.Nv, axes=cri.axisN)
    Xf = sl.rfftn(Xr, s=cri.Nv, axes=cri.axisN)
    alpha = 1e0

    # sigma set
    first_sigma = Xr.max() * 4
    # σを更新する定数c(c<1)の決定
    c = (final_sigma / first_sigma)**(1 / (maxitr - 1))
    print("c = %.8f" % c)
    sigma_list = []
    sigma_list.append(first_sigma)
    for i in range(maxitr - 1):
        sigma_list.append(sigma_list[i] * c)

    # 辞書のクロップする領域を添え字で指定
    crop_op = []
    for l in Dr.shape:
        crop_op.append(slice(0, l))
    crop_op = tuple(crop_op)

    # 射影関数のインスタンス化
    Pcn = cnvrep.getPcn(dsz, cri.Nv, cri.dimN, cri.dimCd, zm=False)

    updcnt = 0
    for sigma in sigma_list:
        print("sigma = %.8f" % sigma, end=" ")

        # 係数の勾配降下
        delta = Xr * np.exp(-(Xr * Xr) / (2 * sigma * sigma))
        Xr = Xr - param_mu * delta
        Xf = sl.rfftn(Xr, cri.Nv, cri.axisN)
        print("l0norm = %i" % np.where(
            abs(Xr.transpose(3, 4, 2, 0, 1).squeeze()[0]) < final_sigma, 0,
            1).sum(),
              end=" ")

        # 辞書の勾配降下
        B = sl.inner(Xf, Df, axis=cri.axisM) - Sf
        derivDf = sl.inner(np.conj(Xf), B, axis=cri.axisK)

        def func(alpha):
            Df_ = Df - alpha * derivDf
            Dr_ = sl.irfftn(Df_, s=cri.Nv, axes=cri.axisN)[crop_op]
            Df_ = sl.rfftn(Dr_, s=cri.Nv, axes=cri.axisN)
            Sf_ = sl.inner(Df_, Xf, axis=cri.axisM)
            return myutil.l2norm(Sr - sl.irfftn(Sf_, s=cri.Nv, axes=cri.axisN))

        error_list = np.array([func(alpha / 2), func(alpha), func(alpha * 2)])
        choice = error_list.argmin()
        alpha *= [0.5, 1, 2][choice]
        print("alpha = %.8f" % alpha, end=" ")
        print("error = %5.5f" % error_list[choice], end=" ")
        Df = Df - alpha * derivDf

        # 辞書の射影
        Dr = Pcn(sl.irfftn(Df, s=cri.Nv,
                           axes=cri.axisN))[crop_op]  # 正規化とゼロパディングを同時に行う
        # print(myutil.l2norm(Dr.T[0]))

        # 係数の射影
        Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
        b = sl.inner(Df, Xf, axis=cri.axisM) - Sf
        c = sl.inner(Df, np.conj(Df), axis=cri.axisM)
        Xf = Xf - np.conj(Df) / c * b
        Xr = sl.irfftn(Xf, s=cri.Nv, axes=cri.axisN)

        if (non_nega): Xr = np.where(Xr < 0, 0, Xr)  # 非負制約

        # Xr = np.where(Xr < 1e-6, 0, Xr)
        print("l0norm_projected = %i" % np.where(
            abs(Xr.transpose(3, 4, 2, 0, 1).squeeze()[0]) < final_sigma, 0,
            1).sum())
        updcnt += 1

    return Dr, Xr
Exemple #57
0
def to_spatial(cri, Af):
    return sl.irfftn(Af, s=cri.Nv, axes=cri.axisN)