def par_xstep(i): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{x}`. Parameters ---------- i : int Index of grouping to update """ global mp_X global mp_DX YU0f = rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN) YU1f = rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] - 1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN) if mp_Cd == 1: b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b, mp_cache[i], axis=mp_axisM) else: b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f, axis=mp_C) + mp_alpha**2*YU1f Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b, mp_axisM, mp_axisC) mp_X[mp_grp[i]:mp_grp[i+1]] = irfftn(Xf, mp_Nv, mp_axisN) mp_DX[i] = irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf, mp_axisM), mp_Nv, mp_axisN)
def reconstruct(self, D=None, X=None): """Reconstruct representation.""" if D is None: D = self.getdict(crop=False) if X is None: X = self.getcoef() Df = rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN) Xf = rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN) DXf = inner(Df, Xf, axis=self.xstep.cri.axisM) return irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
def tikhonov_filter(s, lmbda, npd=16): r"""Lowpass filter based on Tikhonov regularization. Lowpass filter image(s) and return low and high frequency components, consisting of the lowpass filtered image and its difference with the input image. The lowpass filter is equivalent to Tikhonov regularization with `lmbda` as the regularization parameter and a discrete gradient as the operator in the regularization term, i.e. the lowpass component is the solution to .. math:: \mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s} \right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;, where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the regularization parameter, and :math:`G_i` is an operator that computes the discrete gradient along image axis :math:`i`. Once the lowpass component :math:`\mathbf{x}` has been computed, the highpass component is just :math:`\mathbf{s} - \mathbf{x}`. Parameters ---------- s : array_like Input image or array of images. lmbda : float Regularization parameter controlling lowpass filtering. npd : int, optional (default=16) Number of samples to pad at image boundaries. Returns ------- slp : array_like Lowpass image or array of images. shp : array_like Highpass image or array of images. """ grv = np.array([-1.0, 1.0]).reshape([2, 1]) gcv = np.array([-1.0, 1.0]).reshape([1, 2]) Gr = rfftn(grv, (s.shape[0] + 2 * npd, s.shape[1] + 2 * npd), (0, 1)) Gc = rfftn(gcv, (s.shape[0] + 2 * npd, s.shape[1] + 2 * npd), (0, 1)) A = 1.0 + lmbda * (np.conj(Gr) * Gr + np.conj(Gc) * Gc).real if s.ndim > 2: A = A[(slice(None), ) * 2 + (np.newaxis, ) * (s.ndim - 2)] sp = np.pad(s, ((npd, npd), ) * 2 + ((0, 0), ) * (s.ndim - 2), 'symmetric') spshp = sp.shape sp = rfftn(sp, axes=(0, 1)) sp /= A sp = irfftn(sp, s=spshp[0:2], axes=(0, 1)) slp = sp[npd:(sp.shape[0] - npd), npd:(sp.shape[1] - npd)] shp = s - slp return slp.astype(s.dtype), shp.astype(s.dtype)
def ccmodmd_xstep(k): """Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU0 = mp_D_Y0 - mp_D_U0[k] YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k] b = rfftn(YU0, None, mp_cri.axisN) + \ np.conj(mp_Zf[k]) * rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM) mp_D_X[k] = irfftn(Xf, mp_cri.Nv, mp_cri.axisN) mp_DX[k] = irfftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
def xstep(self, gradf=None): """Compute proximal update (gradient descent + constraint). Variables are mapped back and forth between input and frequency domains. Optionally, a monotone PGM version from :cite:`beck-2009-tv` is available. """ if gradf is None: gradf = self.grad_f() if self.stepsizepolicy is not None: if self.k > 1: self.L = self.stepsizepolicy.update(self, gradf) if isinstance(self.stepsizepolicy, StepSizePolicyBB): # BB variants are two-point methods self.stepsizepolicy.store_prev_state(self.Xf, gradf) self.Vf[:] = self.Yf - (1. / self.L) * gradf V = irfftn(self.Vf, self.Nv, self.axisN) self.X[:] = self.prox_g(V) self.Xf = rfftn(self.X, None, self.axisN) if self.opt['Monotone'] and self.k > 0: self.ZZf = self.Xf.copy() self.objfn = self.eval_objfn() if self.objfn_prev[0] < self.objfn[0]: # If increment on objective function # revert to previous iterate self.Xf = self.Xfprv.copy() self.objfn = self.objfn_prev return gradf
def gradient_filters(ndim, axes, axshp, dtype=None): r"""Construct a set of filters for computing gradients in the frequency domain. Parameters ---------- ndim : integer Total number of dimensions in array in which gradients are to be computed axes : tuple of integers Axes on which gradients are to be computed axshp : tuple of integers Shape of axes on which gradients are to be computed dtype : dtype Data type of output arrays Returns ------- Gf : ndarray Frequency domain gradient operators :math:`\hat{G}_i` GHGf : ndarray Sum of products :math:`\sum_i \hat{G}_i^H \hat{G}_i` """ if dtype is None: dtype = np.float32 g = np.zeros([2 if k in axes else 1 for k in range(ndim)] + [ len(axes), ], dtype) for k in axes: g[(0,) * k + (slice(None),) + (0,) * (g.ndim - 2 - k) + (k,)] = \ np.array([1, -1]) Gf = rfftn(g, axshp, axes=axes) GHGf = np.sum(np.conj(Gf) * Gf, axis=-1).real return Gf, GHGf
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.YU[:] = self.Y - self.U self.block_sep0(self.YU)[:] += self.S Zf = rfftn(self.YU, None, self.cri.axisN) Z0f = self.block_sep0(Zf) Z1f = self.block_sep1(Zf) DZ0f = np.conj(self.Df) * Z0f DZ0fBQ = dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC) Z1fQ = dot(self.Q.T, Z1f, axis=self.cri.axisC) b = DZ0fBQ + Z1fQ Xh = solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0, b, self.c, axis=self.cri.axisM) self.Xf[:] = dot(self.Q, Xh, axis=self.cri.axisC) self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * inner( self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = self.rho * (DDXfBB + self.Xf) + \ self.mu * self.GHGf * self.Xf b = self.rho * (dot(self.B.T, DZ0f, axis=self.cri.axisC) + Z1f) self.xrrs = rrs(ax, b) else: self.xrrs = None
def setdict(self, D=None, B=None): """Set dictionary array.""" if D is not None: self.D = np.asarray(D, dtype=self.dtype) if B is not None: self.B = np.asarray(B, dtype=self.dtype) if B is not None or not hasattr(self, 'Gamma'): self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B)) self.Gamma = np.abs(self.Gamma) if D is not None or not hasattr(self, 'Df'): self.Df = rfftn(self.D, self.cri.Nv, self.cri.axisN) self.DSf = np.conj(self.Df) * self.Sf self.DSfBQ = dot(self.B.dot(self.Q).T, self.DSf, axis=self.cri.axisC) # Fold square root of Gamma into the dictionary array to enable # use of the solvedbi_sm solver shpg = [1] * len(self.cri.shpD) shpg[self.cri.axisC] = self.Gamma.shape[0] Gamma2 = np.sqrt(self.Gamma).reshape(shpg) self.gDf = Gamma2 * self.Df if self.opt['HighMemSolve']: self.c = solvedbi_sm_c(self.gDf, np.conj(self.gDf), self.rho, self.cri.axisM) else: self.c = None
def obfn_fvarf(self): """Variable to be evaluated in computing data fidelity term, depending on 'fEvalX' option value. """ return self.Xf if self.opt['fEvalX'] else \ rfftn(self.Y, None, self.cri.axisN)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.""" self.YU[:] = self.Y - self.U YUf = rfftn(self.YU, None, self.cri.axisN) # The sum is over the extra axis indexing spatial gradient # operators G_i, *not* over axisM b = self.DSf + self.rho * (YUf[..., -1] + self.Wtv * np.sum( np.conj(self.Gf) * YUf[..., 0:-1], axis=-1)) if self.cri.Cd == 1: self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho * self.GHGf + self.rho, b, self.c, self.cri.axisM) else: self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho * self.GHGf + self.rho, b, self.cri.axisM, self.cri.axisC) self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM) if self.cri.Cd == 1: DHop = lambda x: np.conj(self.Df) * x else: DHop = lambda x: sl.inner( np.conj(self.Df), x, axis=self.cri.axisC) ax = DHop(Dop( self.Xf)) + (self.rho * self.GHGf + self.rho) * self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def cbpdnmd_setdict(): """Set the dictionary for the cbpdn stage. There are no parameters or return values because all inputs and outputs are from and to global variables. """ # Set working dictionary for cbpdn step and compute DFT of dictionary D mp_Df[:] = rfftn(mp_D_Y0, mp_cri.Nv, mp_cri.axisN)
def cnst_A1T(self, Y1): r"""Compute :math:`A_1^T \mathbf{y}_1` component of :math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. """ Y1f = rfftn(Y1, None, axes=self.cri.axisN) return irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv, self.cri.axisN)
def reconstruct(self, X=None): """Reconstruct representation.""" if X is None: X = self.X Xf = rfftn(X, None, self.cri.axisN) Sf = np.sum(self.Df * Xf, axis=self.cri.axisM) return irfftn(Sf, self.cri.Nv, self.cri.axisN)
def cnst_AT(self, X): r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is a component of ADMM problem constraint. In this case :math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T) \mathbf{x}`. """ Xf = rfftn(X, axes=self.axes) return np.sum(irfftn(np.conj(self.Gf) * Xf, self.axsz, axes=self.axes), axis=self.Y.ndim - 1)
def cnst_A(self, X, Xf=None): r"""Compute :math:`A \mathbf{x}` component of ADMM problem constraint. In this case :math:`A \mathbf{x} = (G_r^T \;\; G_c^T)^T \mathbf{x}`. """ if Xf is None: Xf = rfftn(X, axes=self.axes) return irfftn(self.Gf * Xf[..., np.newaxis], self.axsz, axes=self.axes)
def obfn_dfd(self): r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`. """ XF = rfftn(self.obfn_fvar(), mp_Nv, mp_axisN) DX = np.moveaxis(irfftn(sl.inner(mp_Df, XF, mp_axisM), mp_Nv, mp_axisN), mp_axisM, self.cri.axisM) return np.sum((self.W*(DX-self.S))**2)/2.0
def ccmodmd_setcoef(k): """Set the coefficient maps for the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ # Set working coefficient maps for ccmod step and compute DFT of # coefficient maps Z mp_Zf[k] = rfftn(mp_Z_Y1[k], mp_cri.Nv, mp_cri.axisN)
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.YU[:] = self.Y - self.U b = self.ZSf + self.rho * rfftn(self.YU, None, self.cri.axisN) self.Xf[:] = sl.solvemdbi_ism(self.Zf, self.rho, b, self.cri.axisM, self.cri.axisK) self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN) self.xstep_check(b)
def reconstruct(self, D=None): """Reconstruct representation.""" if D is None: Df = self.Xf else: Df = rfftn(D, None, self.cri.axisN) Sf = np.sum(self.Zf * Df, axis=self.cri.axisM) return irfftn(Sf, self.cri.Nv, self.cri.axisN)
def cnst_A0(self, X, Xf=None): r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )^T \mathbf{x}`. """ if Xf is None: Xf = rfftn(X, axes=self.cri.axisN) return self.Wtv[..., np.newaxis] * irfftn( self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN)
def setS(self, S): """Set the signal.""" self.S = np.asarray(S, dtype=self.dtype) self.Sf = rfftn(self.S) if self.opt['DatFidNoDC']: if S.ndim == 1: self.Sf[0] = 0.0 else: self.Sf[0, 0] = 0.0
def cbpdnmd_xstep(k): """Do the X step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k] YU1 = mp_Z_Y1[k] - mp_Z_U1[k] if mp_cri.Cd == 1: b = np.conj(mp_Df) * rfftn(YU0, None, mp_cri.axisN) + \ rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM) else: b = sl.inner(np.conj(mp_Df), rfftn(YU0, None, mp_cri.axisN), axis=mp_cri.axisC) + \ rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC) mp_Z_X[k] = irfftn(Xf, mp_cri.Nv, mp_cri.axisN) mp_DX[k] = irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
def cnst_A0T(self, X): r"""Compute :math:`A_0^T \mathbf{x}` where :math:`A_0 \mathbf{x}` is a component of the ADMM problem constraint. In this case :math:`A_0^T \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots ) \mathbf{x}`. """ Xf = rfftn(X, axes=self.cri.axisN) return self.Wtv[..., np.newaxis] * irfftn( np.conj(self.Gf) * Xf[..., 0:-1], self.cri.Nv, axes=self.cri.axisN)
def ccmod_xstep(k): """Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU = mp_D_Y - mp_D_U[k] b = mp_ZSf[k] + mp_drho * rfftn(YU, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Zf[k], mp_drho, b, axis=mp_cri.axisM) mp_D_X[k] = irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
def compute_residuals(self): """Compute residuals and stopping thresholds. The parent class method is overridden to ensure that the residual calculations include the additional variables introduced in the modification to the baseline algorithm. """ # The full primary residual is straightforward to compute from # the primary residuals for the baseline algorithm and for the # additional variables r0 = self.rsdl_r(self.AXnr, self.Y) r1 = self.AX1nr - self.Y1 - self.S r = np.sqrt(np.sum(r0**2) + np.sum(r1**2)) # The full dual residual is more complicated to compute than the # full primary residual ATU = self.swapaxes(self.U) + irfftn( np.conj(self.Zf) * rfftn(self.U1, self.cri.Nv, self.cri.axisN), self.cri.Nv, self.cri.axisN) s = self.rho * np.linalg.norm(ATU) # The normalisation factor for the full primal residual is also not # straightforward nAX = np.sqrt( np.linalg.norm(self.AXnr)**2 + np.linalg.norm(self.AX1nr)**2) nY = np.sqrt(np.linalg.norm(self.Y)**2 + np.linalg.norm(self.Y1)**2) rn = max(nAX, nY, np.linalg.norm(self.S)) # The normalisation factor for the full dual residual is # straightforward to compute sn = self.rho * np.sqrt( np.linalg.norm(self.U)**2 + np.linalg.norm(self.U1)**2) # Final residual values and stopping tolerances depend on # whether standard or normalised residuals are specified via the # options object if self.opt['AutoRho', 'StdResiduals']: epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \ rn*self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \ sn*self.opt['RelStopTol'] else: if rn == 0.0: rn = 1.0 if sn == 0.0: sn = 1.0 r /= rn s /= sn epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \ self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \ self.opt['RelStopTol'] return r, s, epri, edua
def cnst_A1(self, X, Xf=None): r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )^T \mathbf{x}`. """ if Xf is None: Xf = rfftn(X, axes=self.cri.axisN) return irfftn( sl.inner(self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)
def cnst_A0(self, X, Xf=None): r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem constraint. """ if Xf is None: Xf = rfftn(X, None, self.cri.axisN) return irfftn( dot(self.B, inner(self.Df, Xf, axis=self.cri.axisM), axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
def cnst_A0T(self, Y0): r"""Compute :math:`A_0^T \mathbf{y}_0` component of :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`). """ # This calculation involves non-negligible computational cost. It # should be possible to disable relevant diagnostic information # (dual residual) to avoid this cost. Y0f = rfftn(Y0, None, self.cri.axisN) return irfftn(sl.inner(np.conj(self.Zf), Y0f, axis=self.cri.axisK), self.cri.Nv, self.cri.axisN)
def cnst_A0(self, X, Xf=None): r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem constraint. """ # This calculation involves non-negligible computational cost # when Xf is None (i.e. the function is not being applied to # self.X). if Xf is None: Xf = rfftn(X, None, self.cri.axisN) return irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)
def setG(self, G): """Set the convolution kernel.""" self.G = G.astype(self.dtype) self.Gf = rfftn(self.G) if self.opt['DatFidNoDC']: if G.ndim == 1: self.Gf[0] = 0.0 else: self.Gf[0, 0] = 0.0 self.GHSf = np.conj(self.Gf) * self.Sf self.GHGf = np.conj(self.Gf) * self.Gf