Пример #1
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Пример #2
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U

        b = self.DSf + self.rho * fftn(self.YU, None, self.cri.axisN)
        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho, b, self.c,
                                        self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #3
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(
                self.Df, self.rho*self.GHGf + self.rho, b, self.c,
                self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(
                self.Df, self.rho*self.GHGf + self.rho, b, self.cri.axisM,
                self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #4
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Пример #5
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho * (YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(self.Df,
                                        self.rho * self.GHGf + self.rho, b,
                                        self.c, self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(self.Df,
                                          self.rho * self.GHGf + self.rho, b,
                                          self.cri.axisM, self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(
                    np.conj(self.Df), x, axis=self.cri.axisC)
            ax = DHop(Dop(
                self.Xf)) + (self.rho * self.GHGf + self.rho) * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #6
0
    def dstep(self, W):
        """Compute dictionary update for training data of preceding
        :meth:`xstep`.
        """

        # Compute residual X D - S in frequency domain
        Ryf = sl.inner(self.Zf, self.Df, axis=self.cri.axisM) - self.Sf
        # Transform to spatial domain, apply mask, and transform back to
        # frequency domain
        Ryf[:] = sl.rfftn(W * sl.irfftn(Ryf, self.cri.Nv, self.cri.axisN),
                          None, self.cri.axisN)
        # Compute gradient
        gradf = sl.inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK)

        # If multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        # Update gradient step
        self.eta = self.eta_a / (self.j + self.eta_b)

        # Compute gradient descent
        self.Gf[:] = self.Df - self.eta * gradf
        self.G = sl.irfftn(self.Gf, self.cri.Nv, self.cri.axisN)

        # Eval proximal operator
        self.Dprv[:] = self.D
        self.D[:] = self.Pcn(self.G)
Пример #7
0
def convBPDN(cri,
             Dr0,
             Sr,
             final_sigma,
             maxitr,
             non_nega,
             param_mu=1,
             debug_dir=None):
    Dr = Dr0.copy()
    Sr = Sr.copy()

    # 係数をl2ノルム最小解で初期化
    Xr = l2norm_minimize(cri, Dr, Sr)

    # 2次元離散フーリエ変換
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
    Sf = sl.rfftn(Sr, s=cri.Nv, axes=cri.axisN)
    Xf = sl.rfftn(Xr, s=cri.Nv, axes=cri.axisN)
    alpha = 1e0

    # sigma set
    first_sigma = Xr.max() * 4
    # σを更新する定数c(c<1)の決定
    c = (final_sigma / first_sigma)**(1 / (maxitr - 1))
    print("c = %.8f" % c)
    sigma_list = []
    sigma_list.append(first_sigma)
    for i in range(maxitr - 1):
        sigma_list.append(sigma_list[i] * c)

    updcnt = 0
    for sigma in sigma_list:
        print("sigma = %.8f" % sigma)

        # 係数の勾配降下
        delta = Xr * np.exp(-(Xr * Xr) / (2 * sigma * sigma))
        Xr = Xr - param_mu * delta
        Xf = sl.rfftn(Xr, cri.Nv, cri.axisN)
        print("l0norm = %i" % np.where(
            abs(Xr.transpose(3, 4, 2, 0, 1).squeeze()[0]) < final_sigma, 0,
            1).sum(),
              end=" ")

        # 係数の射影
        Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)
        b = sl.inner(Df, Xf, axis=cri.axisM) - Sf
        c = sl.inner(Df, np.conj(Df), axis=cri.axisM)
        Xf = Xf - np.conj(Df) / c * b
        Xr = sl.irfftn(Xf, s=cri.Nv, axes=cri.axisN)
        # Xr = np.where(Xr < 0, 0, Xr) # 非負制約
        print("l0norm = %i" % np.where(
            abs(Xr.transpose(3, 4, 2, 0, 1).squeeze()[0]) < final_sigma, 0,
            1).sum(),
              end=" ")

        # Xr = np.where(Xr < 1e-7, 0, Xr)
        updcnt += 1

    return Xr
Пример #8
0
    def hessian_f(self, V):
        """Compute Hessian of :math:`f` applied to V."""

        hessfv = inner(self.Zf, V, axis=self.cri.axisM)
        hessfv = inner(np.conj(self.Zf), hessfv, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            hessfv = np.sum(hessfv, axis=self.cri.axisC, keepdims=True)

        return hessfv
Пример #9
0
def l2norm_minimize(cri, Dr, Sr):
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN) # implicitly zero-padding
    Sf = sl.rfftn(Sr, s=cri.Nv, axes=cri.axisN) # implicitly zero-padding
    Xf = np.conj(Df) / sl.inner(Df, np.conj(Df), axis=cri.axisM) * Sf
    Xr = sl.irfftn(Xf, s=cri.Nv, axes=cri.axisN)

    Sr_ = sl.irfftn(sl.inner(Df, Xf, axis=cri.axisM), s=cri.Nv, axes=cri.axisN)
    # print(l2norm(np.random.randn(*Xr.shape)))
    # print(l2norm(Xr))
    # print(l2norm(Sr - Sr_))
    po = np.stack((format_sig(Sr), format_sig(Sr_)), axis=1)
    saveimg2D(po, 'l2norm_minimization_test.png') # the right side is Sr_
    return Xr
Пример #10
0
    def xstep_check(self, b):
        r"""Check the minimisation of the Augmented Lagrangian with
        respect to :math:`\mathbf{x}` by method `xstep` defined in
        derived classes. This method should be called at the end of any
        `xstep` method.
        """

        if self.opt['LinSolveCheck']:
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: sl.inner(np.conj(self.Zf), x, axis=self.cri.axisK)
            ax = ZHop(Zop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #11
0
    def xstep_check(self, b):
        r"""Check the minimisation of the Augmented Lagrangian with
        respect to :math:`\mathbf{x}` by method `xstep` defined in
        derived classes. This method should be called at the end of any
        `xstep` method.
        """

        if self.opt['LinSolveCheck']:
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: sl.inner(np.conj(self.Zf), x,
                                      axis=self.cri.axisK)
            ax = ZHop(Zop(self.Xf)) + self.rho*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #12
0
def l2norm_minimize(cri, Dr, Sr):
    Df = sl.rfftn(Dr, s=cri.Nv, axes=cri.axisN)  # implicitly zero-padding
    Sf = sl.rfftn(Sr, s=cri.Nv, axes=cri.axisN)  # implicitly zero-padding
    Xf = np.conj(Df) / sl.inner(Df, np.conj(Df), axis=cri.axisM) * Sf
    Xr = sl.irfftn(Xf, s=cri.Nv, axes=cri.axisN)

    return Xr
Пример #13
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(self.Zf[..., [i], :],
                                self.rho, b[..., i], axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)


        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #14
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
        """

        Ef = sl.inner(self.Df, self.Xf, axis=self.cri.axisM) - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Пример #15
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #16
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        Z0f = self.block_sep0(Zf)
        Z1f = self.block_sep1(Zf)

        DZ0f = np.conj(self.Df) * Z0f
        DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC)
        Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC)
        b = DZ0fBQ + Z1fQ

        Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0,
                            b, self.c, axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = self.rho * (DDXfBB + self.Xf) + \
                 self.mu * self.GHGf * self.Xf
            b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC)
                            + Z1f)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #17
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to block vector
        :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T &
        \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
        """

        # This test reflects empirical evidence that two slightly
        # different implementations are faster for single or
        # multi-channel data. This kludge is intended to be temporary.
        if self.cri.Cd > 1:
            for i in range(self.Nb):
                self.xistep(i)
        else:
            self.YU[:] = self.Y[..., np.newaxis] - self.U
            b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \
                + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
            for i in range(self.Nb):
                self.Xf[..., i] = sl.solvedbi_sm(
                    self.Zf[..., [i], :], self.rho, b[..., i],
                    axis=self.cri.axisM)
            self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)


        if self.opt['LinSolveCheck']:
            ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True)
            YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1)
            b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN)
            Xf = self.swapaxes(self.Xf)
            Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
            ZHop = lambda x: np.conj(self.Zf) * x
            ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK,
                        keepdims=True)
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #18
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Пример #19
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return fl2norm2(Ef, axis=self.cri.axisN) / 2.0
Пример #20
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`.
        """

        DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(),
                                       axis=self.cri.axisM),
                       axis=self.cri.axisC)
        Ef = DXBf - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Пример #21
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * fftn(YU0, None, mp_cri.axisN) + \
            fftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), fftn(YU0, None, mp_cri.axisN),
                     axis=mp_cri.axisC) + fftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = ifftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = ifftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Пример #22
0
 def obfn_dfd(self):
     r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
     \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
     """
     XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN)
     DX = np.moveaxis(sl.irfftn(sl.inner(mp_Df, XF, mp_axisM),
                                mp_Nv, mp_axisN), mp_axisM,
                      self.cri.axisM)
     return np.sum((self.W*(DX-self.S))**2)/2.0
Пример #23
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return (linalg.norm(
            self.W * sl.irfftn(Ef, self.cri.Nv, self.cri.axisN))**2) / 2.0
Пример #24
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`.
        """

        DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(),
                                       axis=self.cri.axisM),
                       axis=self.cri.axisC)
        Ef = DXBf - self.Sf
        return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Пример #25
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W (\sum_m
        \mathbf{d}_m * \mathbf{x}_{m} - \mathbf{s}) \|_2^2`
        """

        Ef = sl.inner(self.Df, self.Xf, axis=self.cri.axisM) - self.Sf
        E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)

        return (np.linalg.norm(self.W * E)**2) / 2.0
Пример #26
0
    def hessian_f(self, V):
        """Compute Hessian of :math:`f` applied to V."""

        hessfv = np.conj(self.Df) * inner(self.Df, V, axis=self.cri.axisM)
        # Multiple channel signal, multiple channel dictionary
        if self.cri.Cd > 1:
            hessfv = np.sum(hessfv, axis=self.cri.axisC, keepdims=True)

        return hessfv
Пример #27
0
 def obfn_dfd(self):
     r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
     \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
     """
     XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN)
     DX = np.moveaxis(sl.irfftn(sl.inner(mp_Df, XF, mp_axisM),
                                mp_Nv, mp_axisN), mp_axisM,
                      self.cri.axisM)
     return np.sum((self.W*(DX-self.S))**2)/2.0
Пример #28
0
    def obfn_dfd(self):
        r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
        \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
        """

        Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
          - self.Sf
        return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
                                                  self.cri.axisN))**2) / 2.0
Пример #29
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
                      axis=mp_cri.axisC) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Пример #30
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * spl.rfftn(YU0, None, mp_cri.axisN) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = spl.inner(np.conj(mp_Df), spl.rfftn(YU0, None, mp_cri.axisN),
                         axis=mp_cri.axisC) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = spl.irfftn(spl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Пример #31
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Пример #32
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(
            sl.dot(self.B, sl.inner(self.Df, Xf, axis=self.cri.axisM),
                   axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
Пример #33
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.getdict(crop=False)
        if X is None:
            X = self.getcoef()
        Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = sl.inner(Df, Xf, axis=self.xstep.cri.axisM)
        return sl.irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Пример #34
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f, axis=self.cri.axisK),
                         self.cri.Nv, self.cri.axisN)
Пример #35
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.getdict(crop=False)
        if X is None:
            X = self.getcoef()
        Df = rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = inner(Df, Xf, axis=self.xstep.cri.axisM)
        return irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Пример #36
0
    def cnst_A1(self, X, Xf=None):
        r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
        constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
        \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.cri.axisN)
        return sl.irfftn(sl.inner(
            self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv,
                         self.cri.axisN)
Пример #37
0
    def reconstruct(self, D=None, X=None):
        """Reconstruct representation."""

        if D is None:
            D = self.dstep.var_y1()
        if X is None:
            X = self.xstep.var_y1()
        Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
        Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
        DXf = sl.inner(Df, Xf, axis=self.xstep.cri.axisM)
        return sl.irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
Пример #38
0
    def cnst_A1(self, X, Xf=None):
        r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
        constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
        \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
        """

        if Xf is None:
            Xf = sl.rfftn(X, axes=self.cri.axisN)
        return sl.irfftn(
            sl.inner(self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM),
            self.cri.Nv, self.cri.axisN)
Пример #39
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Пример #40
0
    def cnst_A0T(self, Y0):
        r"""Compute :math:`A_0^T \mathbf{y}_0` component of
        :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
        """

        # This calculation involves non-negligible computational cost. It
        # should be possible to disable relevant diagnostic information
        # (dual residual) to avoid this cost.
        Y0f = sl.rfftn(Y0, None, self.cri.axisN)
        return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f,
                                  axis=self.cri.axisK), self.cri.Nv,
                         self.cri.axisN)
Пример #41
0
    def cnst_A0(self, X, Xf=None):
        r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
        constraint.
        """

        # This calculation involves non-negligible computational cost
        # when Xf is None (i.e. the function is not being applied to
        # self.X).
        if Xf is None:
            Xf = sl.rfftn(X, None, self.cri.axisN)
        return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
                         self.cri.Nv, self.cri.axisN)
Пример #42
0
def ccmodmd_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_D_Y0 - mp_D_U0[k]
    YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k]
    b = sl.rfftn(YU0, None, mp_cri.axisN) + \
      np.conj(mp_Zf[k]) * sl.rfftn(YU1, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM)
    mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
Пример #43
0
def ccmodmd_xstep(k):
    """Do the X step of the ccmod stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_D_Y0 - mp_D_U0[k]
    YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k]
    b = fftn(YU0, None, mp_cri.axisN) + \
        np.conj(mp_Zf[k]) * fftn(YU1, None, mp_cri.axisN)
    Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM)
    mp_D_X[k] = ifftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = ifftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
Пример #44
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute X D - S
        Ryf = self.eval_Rf(self.Yf)

        gradf = inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Пример #45
0
    def obfn_dfd(self):
        """Compute data fidelity term."""

        if self.opt['DatFidNoDC']:
            Sf = self.Sf.copy()
            if self.cri.dimN == 1:
                Sf[0] = 0
            else:
                Sf[0, 0] = 0
        else:
            Sf = self.Sf
        Ef = inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM) - Sf
        return rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
Пример #46
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute X D - S
        Ryf = self.eval_Rf(self.Yf)

        gradf = sl.inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Пример #47
0
 def evaluate(self, S, X):
     """Optionally evaluate functional values."""
     if self.opt['AccurateDFid']:
         Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
         Xf = sl.rfftn(X, self.cri.Nv, self.cri.axisN)
         Sf = sl.rfftn(S, self.cri.Nv, self.cri.axisN)
         Ef = sl.inner(Df, Xf, axis=self.cri.axisM) - Sf
         dfd = sl.rfl2norm2(Ef, S.shape, axis=self.cri.axisN) / 2.
         rl1 = np.sum(np.abs(X))
         evl = dict(DFid=dfd, RegL1=rl1, ObjFun=dfd+self.lmbda*rl1)
     else:
         evl = None
     return evl
Пример #48
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Пример #49
0
def cbpdn_setdict():
    """Set the dictionary for the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    global mp_DSf
    # Set working dictionary for cbpdn step and compute DFT of dictionary
    # D and of D^T S
    mp_Df[:] = sl.rfftn(mp_D_Y, mp_cri.Nv, mp_cri.axisN)
    if mp_cri.Cd == 1:
        mp_DSf[:] = np.conj(mp_Df) * mp_Sf
    else:
        mp_DSf[:] = sl.inner(np.conj(mp_Df[np.newaxis, ...]), mp_Sf,
                              axis=mp_cri.axisC+1)
Пример #50
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        X = mp_Z_Y
        Xf = mp_Zf
        Df = mp_Df
        Sf = mp_Sf
        Ef = sl.inner(Df[np.newaxis, ...], Xf,
                       axis=self.xstep.cri.axisM+1) - Sf
        Ef = np.swapaxes(Ef, 0, self.xstep.cri.axisK+1)[0]
        dfd = sl.rfl2norm2(Ef, self.xstep.S.shape,
                            axis=self.xstep.cri.axisN)/2.0
        rl1 = np.sum(np.abs(X))
        obj = dfd + self.xstep.lmbda*rl1
        return (obj, dfd, rl1)
Пример #51
0
    def setcoef(self, Z):
        """Set coefficient array."""

        # If the dictionary has a single channel but the input (and
        # therefore also the coefficient map array) has multiple
        # channels, the channel index and multiple image index have
        # the same behaviour in the dictionary update equation: the
        # simplest way to handle this is to just reshape so that the
        # channels also appear on the multiple image index.
        if self.cri.Cd == 1 and self.cri.C > 1:
            Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) +
                          (self.cri.M,))
        self.Z = np.asarray(Z, dtype=self.dtype)

        self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
        # Compute X^H S
        self.ZSf = sl.inner(np.conj(self.Zf), self.Sf, self.cri.axisK)
Пример #52
0
    def relax_AX(self):
        """The parent class method that this method overrides only
        implements the relaxation step for the variables of the baseline
        consensus algorithm. This method calls the overridden method and
        then implements the relaxation step for the additional variables
        required for the mask decoupling modification to the baseline
        algorithm.
        """

        super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
        self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
                                        axis=self.cri.axisM),
                               self.cri.Nv, self.cri.axisN)
        if self.rlx == 1.0:
            self.AX1 = self.AX1nr
        else:
            alpha = self.rlx
            self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
Пример #53
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.cgit = None

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:], cgit = sl.solvemdbi_cg(
            self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK,
            self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf)
        self.cgit = cgit
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Пример #54
0
    def eval_grad(self):
        """Compute gradient in Fourier domain."""

        # Compute X D - S
        self.Ryf[:] = self.eval_Rf(self.Yf)

        # Map to spatial domain to multiply by mask
        Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
        # Multiply by mask
        self.WRy[:] = (self.W**2) * Ry
        # Map back to frequency domain
        WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)

        gradf = sl.inner(np.conj(self.Zf), WRyf, axis=self.cri.axisK)

        # Multiple channel signal, single channel dictionary
        if self.cri.C > 1 and self.cri.Cd == 1:
            gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)

        return gradf
Пример #55
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        if self.opt['AccurateDFid']:
            DX = self.reconstruct()
            W = self.dstep.W
            S = self.dstep.S
        else:
            W = mp_W
            S = mp_S
            Xf = mp_Zf
            Df = mp_Df
            DX = sl.irfftn(sl.inner(
                Df[np.newaxis, ...], Xf, axis=self.xstep.cri.axisM+1),
                            self.xstep.cri.Nv,
                            np.array(self.xstep.cri.axisN) + 1)

        dfd = (np.linalg.norm(W * (DX - S))**2) / 2.0
        rl1 = np.sum(np.abs(self.getcoef()))
        obj = dfd + self.xstep.lmbda*rl1

        return (obj, dfd, rl1)
Пример #56
0
    def evaluate(self):
        """Evaluate functional value of previous iteration."""

        if self.opt['AccurateDFid']:
            if self.dmethod == 'fista':
                D = self.dstep.getdict(crop=False)
            else:
                D = self.dstep.var_y()
            if self.xmethod == 'fista':
                X = self.xstep.getcoef()
            else:
                X = self.xstep.var_y()
            Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
            Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
            Sf = self.xstep.Sf
            Ef = sl.inner(Df, Xf, axis=self.xstep.cri.axisM) - Sf
            dfd = sl.rfl2norm2(Ef, self.xstep.S.shape,
                               axis=self.xstep.cri.axisN) / 2.0
            rl1 = np.sum(np.abs(X))
            return dict(DFid=dfd, RegL1=rl1,
                        ObjFun=dfd + self.xstep.lmbda * rl1)
        else:
            return None
Пример #57
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        Zf = sl.rfftn(self.YU, None, self.cri.axisN)
        ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC)
        b = self.DSfBQ + self.rho * ZfQ

        Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c,
                            axis=self.cri.axisM)
        self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            DDXf = np.conj(self.Df) *  sl.inner(self.Df, self.Xf,
                                                axis=self.cri.axisM)
            DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC)
            ax = DDXfBB + self.rho * self.Xf
            b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \
                self.rho * Zf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Пример #58
0
 def test_21(self):
     x = np.random.randn(16, 8)
     y = np.random.randn(16, 8)
     ip1 = np.sum(x * y, axis=0, keepdims=True)
     ip2 = linalg.inner(x, y, axis=0)
     assert np.linalg.norm(ip1 - ip2) < 1e-13
Пример #59
0
 def test_22(self):
     x = np.random.randn(8, 8, 3, 12)
     y = np.random.randn(8, 1, 1, 12)
     ip1 = np.sum(x * y, axis=-1, keepdims=True)
     ip2 = linalg.inner(x, y, axis=-1)
     assert np.linalg.norm(ip1 - ip2) < 1e-13
Пример #60
0
    def eval_Rf(self, Vf):
        """Evaluate smooth term in Vf."""

        return sl.inner(self.Zf, Vf, axis=self.cri.axisM) - self.Sf