Esempio n. 1
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho * (YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(self.Df,
                                        self.rho * self.GHGf + self.rho, b,
                                        self.c, self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(self.Df,
                                          self.rho * self.GHGf + self.rho, b,
                                          self.cri.axisM, self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(
                    np.conj(self.Df), x, axis=self.cri.axisC)
            ax = DHop(Dop(
                self.Xf)) + (self.rho * self.GHGf + self.rho) * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Esempio n. 2
0
    def xstep(self):
        """Minimise Augmented Lagrangian with respect to x."""

        self.cgit = None

        self.YU[:] = self.Y - self.U

        b = self.ASf + self.rho * sl.rfftn(self.YU, None, self.cri.axisN)
        if self.opt['LinSolve'] == 'SM':
            self.Xf[:] = sl.solvemdbi_ism(self.Af, self.rho, b, self.cri.axisM,
                                          self.cri.axisK)
        else:
            self.Xf[:], cgit = sl.solvemdbi_cg(self.Af, self.rho, b,
                                               self.cri.axisM, self.cri.axisK,
                                               self.opt['CG', 'StopTol'],
                                               self.opt['CG',
                                                        'MaxIter'], self.Xf)
            self.cgit = cgit

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Aop = lambda x: np.sum(
                self.Af * x, axis=self.cri.axisM, keepdims=True)
            AHop = lambda x: np.sum(
                np.conj(self.Af) * x, axis=self.cri.axisK, keepdims=True)
            ax = AHop(Aop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Esempio n. 3
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U

        b = self.DSf + self.rho * fftn(self.YU, None, self.cri.axisN)
        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho, b, self.c,
                                        self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = ifftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Esempio n. 4
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Esempio n. 5
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Esempio n. 6
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)

        # The sum is over the extra axis indexing spatial gradient
        # operators G_i, *not* over axisM
        b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(
            np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))

        if self.cri.Cd == 1:
            self.Xf[:] = sl.solvedbi_sm(
                self.Df, self.rho*self.GHGf + self.rho, b, self.c,
                self.cri.axisM)
        else:
            self.Xf[:] = sl.solvemdbi_ism(
                self.Df, self.rho*self.GHGf + self.rho, b, self.cri.axisM,
                self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)
            if self.cri.Cd == 1:
                DHop = lambda x: np.conj(self.Df) * x
            else:
                DHop = lambda x: sl.inner(np.conj(self.Df), x,
                                          axis=self.cri.axisC)
            ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Esempio n. 7
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Esempio n. 8
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        b = self.ZSf + self.rho * sl.rfftn(self.YU, None, self.cri.axisN)
        self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Esempio n. 9
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * fftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = sl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = sl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = ifftn(Xf, mp_cri.Nv, mp_cri.axisN)
Esempio n. 10
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * sl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = sl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = sl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Esempio n. 11
0
def cbpdn_xstep(k):
    """Do the X step of the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    YU = mp_Z_Y[k] - mp_Z_U[k]
    b = mp_DSf[k] + mp_xrho * spl.rfftn(YU, None, mp_cri.axisN)
    if mp_cri.Cd == 1:
        Xf = spl.solvedbi_sm(mp_Df, mp_xrho, b, axis=mp_cri.axisM)
    else:
        Xf = spl.solvemdbi_ism(mp_Df, mp_xrho, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
Esempio n. 12
0
def update_dict(cri, Pcn, crop_op, Xr, Gr, Hr, Sf, param_rho):
    # D step
    Xf = to_frequency(cri, Xr)
    Gf = to_frequency(cri, Gr)
    Hf = to_frequency(cri, Hr)
    XSf = sl.inner(np.conj(Xf), Sf, cri.axisK)
    b = XSf + param_rho * (Gf - Hf)
    Df = sl.solvemdbi_ism(Xf, param_rho, b, cri.axisM, cri.axisK)
    Dr = to_spatial(cri, Df)
    # G step
    Gr = Pcn(Dr + Hr)
    # H step
    Hr = Hr + Dr - Gr
    return Gr[crop_op], Hr
Esempio n. 13
0
    def test_11(self):
        rho = 1e-1
        N = 32
        M = 16
        K = 8
        D = complex_randn(N, N, 1, 1, M)
        X = complex_randn(N, N, 1, K, M)
        S = np.sum(D*X, axis=4, keepdims=True)

        Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
        XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
        Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
        Dslv = linalg.solvemdbi_ism(X, rho, XHop(S) + rho*Z, 4, 3)
        assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
Esempio n. 14
0
    def test_11(self):
        rho = 1e-1
        N = 32
        M = 16
        K = 8
        D = util.complex_randn(N, N, 1, 1, M)
        X = util.complex_randn(N, N, 1, K, M)
        S = np.sum(D*X, axis=4, keepdims=True)

        Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
        XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
        Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
        Dslv = linalg.solvemdbi_ism(X, rho, XHop(S) + rho*Z, 4, 3)

        assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
Esempio n. 15
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Esempio n. 16
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`.
        """

        self.YU[:] = self.Y - self.U
        self.block_sep0(self.YU)[:] += self.S
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
                     axis=self.cri.axisK) + self.block_sep1(YUf)

        self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
                                      self.cri.axisK)
        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
        self.xstep_check(b)
Esempio n. 17
0
def dictionary_learning_by_L1_Dstep(Xf, S, G1, H1, G0, H0, parameter_rho_dic,
                                    iteration, cri, dsz):
    GH = con.convert_to_Df(G1 - H1, S, cri)
    GSH = con.convert_to_Sf(G0 + S - H0, cri)
    b = GH + sl.inner(np.conj(Xf), GSH, cri.axisK)
    Df = sl.solvemdbi_ism(Xf, 1, b, cri.axisM, cri.axisK)
    D = con.convert_to_D(Df, dsz, cri)

    XfDf = np.sum(Xf * Df, axis=cri.axisM)
    XD = con.convert_to_S(XfDf, cri)
    G0 = sp.prox_l1(XD - S + H0, (1 / parameter_rho_dic))

    H0 = H0 + XD - G0 - S

    return D, G0, H0, XD
Esempio n. 18
0
    def test_06(self):
        rho = 1e-1
        N = 64
        M = 32
        K = 8
        D = np.random.randn(N, N, 1, 1, M).astype('complex') + \
            np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j
        X = np.random.randn(N, N, 1, K, M).astype('complex') + \
            np.random.randn(N, N, 1, K, M).astype('complex') * 1.0j
        S = np.sum(D*X, axis=4, keepdims=True)

        Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
        XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
        Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
        Dslv = linalg.solvemdbi_ism(X, rho,  XHop(S) + rho*Z, 4, 3)

        assert(linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11)
Esempio n. 19
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * fftn(YU0, None, mp_cri.axisN) + \
            fftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), fftn(YU0, None, mp_cri.axisN),
                     axis=mp_cri.axisC) + fftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = ifftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = ifftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Esempio n. 20
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. The only parameter is the slice
    index `k` and there are no return values; all inputs and outputs are
    from and to global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
                      axis=mp_cri.axisC) + \
            sl.rfftn(YU1, None, mp_cri.axisN)
        Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Esempio n. 21
0
def cbpdnmd_xstep(k):
    """Do the X step of the cbpdn stage. There are no parameters
    or return values because all inputs and outputs are from and to
    global variables.
    """

    YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
    YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
    if mp_cri.Cd == 1:
        b = np.conj(mp_Df) * spl.rfftn(YU0, None, mp_cri.axisN) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
    else:
        b = spl.inner(np.conj(mp_Df), spl.rfftn(YU0, None, mp_cri.axisN),
                         axis=mp_cri.axisC) + \
            spl.rfftn(YU1, None, mp_cri.axisN)
        Xf = spl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
    mp_Z_X[k] = spl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
    mp_DX[k] = spl.irfftn(spl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Esempio n. 22
0
def dictionary_learning_by_L2(X, S, dsz, G, H, parameter_rho_dic, iteration,
                              thr, cri):
    Xf = con.convert_to_Xf(X, S, cri)

    bar_D = tqdm(total=iteration, desc='D', leave=False)
    for i in range(iteration):

        Gf = con.convert_to_Df(G, S, cri)
        Hf = con.convert_to_Df(H, S, cri)
        GH = Gf - Hf
        Sf = con.convert_to_Sf(S, cri)
        b = parameter_rho_dic * GH + sl.inner(np.conj(Xf), Sf, cri.axisK)
        Df = sl.solvemdbi_ism(Xf, parameter_rho_dic, b, cri.axisM, cri.axisK)
        D = con.convert_to_D(Df, dsz, cri)

        XfDf = np.sum(Xf * Df, axis=cri.axisM)
        XD = con.convert_to_S(XfDf, cri)

        Dr = np.asarray(D.reshape(cri.shpD), dtype=S.dtype)
        Hr = np.asarray(H.reshape(cri.shpD), dtype=S.dtype)
        Pcn = cr.getPcn(dsz, cri.Nv, cri.dimN, cri.dimCd)
        Gr = Pcn(Dr + Hr)
        G = cr.bcrop(Gr, dsz, cri.dimN).squeeze()

        H = H + D - G

        Est = sp.norm_2l2(XD - S)
        if (i == 0):
            pre_Est = 1.1 * Est

        if ((pre_Est - Est) / pre_Est <= thr):
            bar_D.update(iteration - i)
            break

        pre_Est = Est

        bar_D.update(1)
    bar_D.close()
    return D, G, H
Esempio n. 23
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        YUf0 = self.block_sep0(YUf)
        YUf1 = self.block_sep1(YUf)

        b = self.rho * np.sum(np.conj(self.GDf) * YUf1, axis=-1)
        if self.cri.Cd > 1:
            b = np.sum(b, axis=self.cri.axisC, keepdims=True)
        b += self.DSf + self.rho * YUf0

        # Concatenate multiple GDf components on axisC. For
        # single-channel signals, and multi-channel signals with a
        # single-channel dictionary, we end up with sl.solvemdbi_ism
        # solving a linear system of rank dimN+1 (corresponding to the
        # dictionary and a gradient operator per spatial dimension) plus
        # an identity. For multi-channel signals with a multi-channel
        # dictionary, we end up with sl.solvemdbi_ism solving a linear
        # system of rank C.d (dimN+1) (corresponding to the dictionary
        # and a gradient operator per spatial dimension for each
        # channel) plus an identity.

        # The structure of the linear system to be solved depends on the
        # number of channels in the signal and dictionary. Both branches are
        # the same in the single-channel signal case (the choice of handling
        # it via the 'else' branch is somewhat arbitrary).
        if self.cri.C > 1 and self.cri.Cd == 1:
            # Concatenate multiple GDf components on the final axis
            # of GDf (that indexes the number of gradient operators). For
            # multi-channel signals with a single-channel dictionary,
            # sl.solvemdbi_ism has to solve a linear system of rank dimN+1
            # (corresponding to the dictionary and a gradient operator per
            # spatial dimension)
            DfGDf = np.concatenate([
                self.Df[..., np.newaxis],
            ] + [
                np.sqrt(self.rho) * self.GDf[..., k, np.newaxis]
                for k in range(self.GDf.shape[-1])
            ],
                                   axis=-1)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b[..., np.newaxis],
                                          self.cri.axisM, -1)[..., 0]
        else:
            # Concatenate multiple GDf components on axisC. For multi-channel
            # signals with a multi-channel dictionary, sl.solvemdbi_ism has
            # to solve a linear system of rank C.d (dimN+1) (corresponding to
            # the dictionary and a gradient operator per spatial dimension
            # for each channel) plus an identity.
            DfGDf = np.concatenate([
                self.Df,
            ] + [
                np.sqrt(self.rho) * self.GDf[..., k]
                for k in range(self.GDf.shape[-1])
            ],
                                   axis=self.cri.axisC)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            if self.cri.C > 1 and self.cri.Cd == 1:
                Dop = lambda x: sl.inner(
                    DfGDf, x[..., np.newaxis], axis=self.cri.axisM)
                DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=-1)
                ax = DHop(Dop(self.Xf))[..., 0] + self.rho * self.Xf
            else:
                Dop = lambda x: sl.inner(DfGDf, x, axis=self.cri.axisM)
                DHop = lambda x: sl.inner(
                    np.conj(DfGDf), x, axis=self.cri.axisC)
                ax = DHop(Dop(self.Xf)) + self.rho * self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None
Esempio n. 24
0
    def xstep(self):
        r"""Minimise Augmented Lagrangian with respect to
        :math:`\mathbf{x}`."""

        self.YU[:] = self.Y - self.U
        YUf = sl.rfftn(self.YU, None, self.cri.axisN)
        YUf0 = self.block_sep0(YUf)
        YUf1 = self.block_sep1(YUf)

        b = self.rho * np.sum(np.conj(self.GDf) * YUf1, axis=-1)
        if self.cri.Cd > 1:
            b = np.sum(b, axis=self.cri.axisC, keepdims=True)
        b += self.DSf + self.rho*YUf0

        # Concatenate multiple GDf components on axisC. For
        # single-channel signals, and multi-channel signals with a
        # single-channel dictionary, we end up with sl.solvemdbi_ism
        # solving a linear system of rank dimN+1 (corresponding to the
        # dictionary and a gradient operator per spatial dimension) plus
        # an identity. For multi-channel signals with a multi-channel
        # dictionary, we end up with sl.solvemdbi_ism solving a linear
        # system of rank C.d (dimN+1) (corresponding to the dictionary
        # and a gradient operator per spatial dimension for each
        # channel) plus an identity.

        # The structure of the linear system to be solved depends on the
        # number of channels in the signal and dictionary. Both branches are
        # the same in the single-channel signal case (the choice of handling
        # it via the 'else' branch is somewhat arbitrary).
        if self.cri.C > 1 and self.cri.Cd == 1:
            # Concatenate multiple GDf components on the final axis
            # of GDf (that indexes the number of gradient operators). For
            # multi-channel signals with a single-channel dictionary,
            # sl.solvemdbi_ism has to solve a linear system of rank dimN+1
            # (corresponding to the dictionary and a gradient operator per
            # spatial dimension)
            DfGDf = np.concatenate(
                [self.Df[..., np.newaxis],] +
                [np.sqrt(self.rho)*self.GDf[..., k, np.newaxis] for k
                 in range(self.GDf.shape[-1])], axis=-1)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b[..., np.newaxis],
                                          self.cri.axisM, -1)[..., 0]
        else:
            # Concatenate multiple GDf components on axisC. For multi-channel
            # signals with a multi-channel dictionary, sl.solvemdbi_ism has
            # to solve a linear system of rank C.d (dimN+1) (corresponding to
            # the dictionary and a gradient operator per spatial dimension
            # for each channel) plus an identity.
            DfGDf = np.concatenate(
                [self.Df,] + [np.sqrt(self.rho)*self.GDf[..., k] for k
                              in range(self.GDf.shape[-1])],
                axis=self.cri.axisC)
            self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b, self.cri.axisM,
                                          self.cri.axisC)

        self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)

        if self.opt['LinSolveCheck']:
            if self.cri.C > 1 and self.cri.Cd == 1:
                Dop = lambda x: sl.inner(DfGDf, x[..., np.newaxis],
                                         axis=self.cri.axisM)
                DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=-1)
                ax = DHop(Dop(self.Xf))[..., 0] + self.rho*self.Xf
            else:
                Dop = lambda x: sl.inner(DfGDf, x, axis=self.cri.axisM)
                DHop = lambda x: sl.inner(np.conj(DfGDf), x,
                                          axis=self.cri.axisC)
                ax = DHop(Dop(self.Xf)) + self.rho*self.Xf
            self.xrrs = sl.rrs(ax, b)
        else:
            self.xrrs = None