def test_Gradient(par): """Dot-test for Gradient operator""" for kind in ("forward", "centered", "backward"): # 2d Gop = Gradient( (par["ny"], par["nx"]), sampling=(par["dy"], par["dx"]), edge=par["edge"], kind=kind, dtype="float32", ) assert dottest(Gop, 2 * par["ny"] * par["nx"], par["ny"] * par["nx"], tol=1e-3) # 3d Gop = Gradient( (par["nz"], par["ny"], par["nx"]), sampling=(par["dz"], par["dy"], par["dx"]), edge=par["edge"], kind=kind, dtype="float32", ) assert dottest( Gop, 3 * par["nz"] * par["ny"] * par["nx"], par["nz"] * par["ny"] * par["nx"], tol=1e-3, )
def test_Gradient(par): """Dot-test for Gradient operator """ for kind in ('forward', 'centered', 'backward'): # 2d Gop = Gradient((par['ny'], par['nx']), sampling=(par['dy'], par['dx']), edge=par['edge'], kind=kind, dtype='float32') assert dottest(Gop, 2 * par['ny'] * par['nx'], par['ny'] * par['nx'], tol=1e-3) # 3d Gop = Gradient((par['nz'], par['ny'], par['nx']), sampling=(par['dz'], par['dy'], par['dx']), edge=par['edge'], kind=kind, dtype='float32') assert dottest(Gop, 3 * par['nz'] * par['ny'] * par['nx'], par['nz'] * par['ny'] * par['nx'], tol=1e-3)
def FirstDirectionalDerivative(dims, v, sampling=1, edge=False, dtype='float64'): r"""First Directional derivative. Apply directional derivative operator to a multi-dimensional array (at least 2 dimensions are required) along either a single common direction or different directions for each point of the array. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension. v : :obj:`np.ndarray`, optional Single direction (array of size :math:`n_{dims}`) or group of directions (array of size :math:`[n_{dims} \times n_{d0} \times ... \times n_{n_{dims}}`) sampling : :obj:`tuple`, optional Sampling steps for each direction. edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``). dtype : :obj:`str`, optional Type of elements in input array. Returns ------- ddop : :obj:`pylops.LinearOperator` First directional derivative linear operator Notes ----- The FirstDirectionalDerivative applies a first-order derivative to a multi-dimensional array along the direction defined by the unitary vector \mathbf{v}: .. math:: df_\mathbf{v} = \nabla f \mathbf{v} or along the directions defined by the unitary vectors :math:`\mathbf{v}(x, y)`: .. math:: df_\mathbf{v}(x,y) = \nabla f(x,y) \mathbf{v}(x,y) where we have here considered the 2-dimensional case. This operator can be easily implemented as the concatenation of the :py:class:`pylops.Gradient` operator and the :py:class:`pylops.Diagonal` operator with :math:\mathbf{v} along the main diagonal. """ Gop = Gradient(dims, sampling=sampling, edge=edge, dtype=dtype) if v.ndim == 1: Dop = Diagonal(v, dims=[len(dims)]+list(dims), dir=0, dtype=dtype) else: Dop = Diagonal(v.ravel(), dtype=dtype) Sop = Sum(dims=[len(dims)]+list(dims), dir=0, dtype=dtype) ddop = Sop * Dop * Gop return ddop
def solve(self, data: np.ndarray, maxiter: int = 150, tol: float = 5*10**(-4)): if self.reg_mode is not None: grad = Gradient(dims=self.domain_shape, edge = True, dtype='float64', kind='backward') K = self.alpha * grad if not self.tau: norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM'))) sigma = 0.99 / norm print("Calced tau: "+str(sigma) + ". " "Next run with same alpha, set this tau value to decrease runtime.") tau = sigma else: tau = self.tau sigma = tau if self.reg_mode == 'tv': F_star = Projection(self.domain_shape, len(self.domain_shape)) else: F_star = DatatermLinear() F_star.set_proxdata(0) else: tau = 0.99 sigma = tau F_star = DatatermLinear() K = 0 G = DatatermRecBregman(self.O) G.set_proxparam(tau) G.set_proxdata(data.ravel()) F_star.set_proxparam(sigma) pk = np.zeros(self.domain_shape) pk = pk.T.ravel() plt.Figure() ulast = np.zeros(self.domain_shape) u01 = ulast i = 0 while np.linalg.norm(self.O * u01.ravel() - data.ravel()) > self.assessment: print("norm error: " + str(np.linalg.norm(self.O * u01.ravel() - data.ravel()))) self.solver = PdHgm(K, F_star, G) self.solver.maxiter = maxiter self.solver.tol = tol G.set_proxdata(data.ravel()) G.setP(pk) self.solver.solve() u01 = np.reshape(np.real(self.solver.var['x']), self.domain_shape) pk = pk - (1 / self.alpha) * np.real(self.O.H*(self.O*u01.ravel() - data.ravel())) i = i + 1 if self.plot_iteration: plt.gray() plt.imshow(u01, vmin=0, vmax=1) plt.axis('off') #plt.title('RRE =' + str(round(RRE_breg, 2)), y=-0.1, fontsize=20) plt.savefig(self.data_output_path + 'Bregman_reconstruction_iter' + str(i) + '.png', bbox_inches='tight', pad_inches=0) plt.close() return np.reshape(self.solver.var['x'], self.domain_shape)
def test_Gradient(par): """Dot-test for Gradient operator """ # 2d Gop = Gradient((par['ny'], par['nx']), sampling=(par['dy'], par['dx']), edge=par['edge'], dtype='float32') assert dottest(Gop, 2 * par['ny'] * par['nx'], par['ny'] * par['nx'], tol=1e-3) # 3d Gop = Gradient((par['nz'], par['ny'], par['nx']), sampling=(par['dz'], par['dy'], par['dx']), edge=par['edge'], dtype='float32') assert dottest(Gop, 3 * par['nz'] * par['ny'] * par['nx'], par['nz'] * par['ny'] * par['nx'], tol=1e-3)
def solve(self, data: np.ndarray, maxiter: int = 150, tol: float = 5 * 10**(-4)): if self.reg_mode is not None: grad = Gradient(dims=self.domain_shape, edge=True, dtype='float64', kind='backward') K = grad * self.alpha if not self.tau: norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM'))) sigma = 0.99 / norm print( "Calced tau: " + str(sigma) + ". " "Next run with same alpha: set this tau value to decrease runtime." ) tau = sigma else: tau = self.tau sigma = tau if self.reg_mode == 'tv': F_star = Projection(self.domain_shape, len(self.domain_shape)) else: F_star = DatatermLinear() F_star.set_proxdata(0) else: tau = 0.99 sigma = tau F_star = DatatermLinear() K = 0 G = DatatermLinear() G.set_proxparam(tau) G.set_proxdata(data.ravel()) F_star.set_proxparam(sigma) self.solver = PdHgm(K, F_star, G) self.solver.maxiter = maxiter self.solver.tol = tol self.solver.solve() return np.reshape(self.solver.var['x'], self.domain_shape)
def solve(self, data: np.ndarray, maxiter: int = 150, tol: float = 5 * 10**(-4)): if self.reg_mode is not None: if len(self.domain_shape) > 2: grad = Gradient(dims=self.domain_shape, edge=True, dtype='float64') else: ex = np.ones((self.domain_shape[1], 1)) ey = np.ones((1, self.domain_shape[0])) dx = sparse.diags([1, -1], [0, 1], shape=(self.domain_shape[1], self.domain_shape[1])).tocsr() dx[self.domain_shape[1] - 1, :] = 0 dy = sparse.diags([-1, 1], [0, 1], shape=(self.domain_shape[0], self.domain_shape[0])).tocsr() dy[self.domain_shape[0] - 1, :] = 0 grad = sparse.vstack( (sparse.kron(dx, sparse.eye(self.domain_shape[0]).tocsr()), sparse.kron(sparse.eye(self.domain_shape[1]).tocsr(), dy))) K = self.alpha * grad if not self.tau: if np.prod(self.domain_shape) > 25000: long = True else: long = False if long: print("Start evaluate tau. Long runtime.") if len(self.domain_shape) > 2: norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM'))) else: norm = normest(K) sigma = 0.99 / norm if long: print("Calc tau: " + str(sigma)) tau = sigma else: tau = self.tau sigma = tau if self.reg_mode == 'tv': F_star = Projection(self.domain_shape, len(self.domain_shape)) else: F_star = DatatermLinear() F_star.set_proxdata(0) else: tau = 0.99 sigma = tau F_star = DatatermLinear() K = 0 G = DatatermLinear() G.set_proxparam(tau) G.set_proxdata(data.ravel()) F_star.set_proxparam(sigma) self.solver = PdHgm(K, F_star, G) self.solver.maxiter = maxiter self.solver.tol = tol self.solver.solve() return np.reshape(self.solver.var['x'], self.domain_shape)
def solve(self, data: np.ndarray, maxiter: int = 150, tol: float = 5 * 10**(-4)): if self.reg_mode is not None: if len(self.domain_shape) > 2: grad = Gradient(dims=self.domain_shape, edge=True, dtype='float64') else: dx = sparse.diags([1, -1], [0, 1], shape=(self.domain_shape[1], self.domain_shape[1])).tocsr() dx[self.domain_shape[1] - 1, :] = 0 dy = sparse.diags([-1, 1], [0, 1], shape=(self.domain_shape[0], self.domain_shape[0])).tocsr() dy[self.domain_shape[0] - 1, :] = 0 grad = sparse.vstack( (sparse.kron(dx, sparse.eye(self.domain_shape[0]).tocsr()), sparse.kron(sparse.eye(self.domain_shape[1]).tocsr(), dy))) K = self.alpha * grad if not self.tau: if np.prod(self.domain_shape) > 25000: long = True else: long = False if long: print("Start evaluate tau. Long runtime.") if len(self.domain_shape) > 2: norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM'))) else: norm = normest(K) sigma = 0.99 / norm if long: print("Calc tau: " + str(sigma)) tau = sigma else: tau = self.tau sigma = tau if self.reg_mode == 'tv': F_star = Projection(self.domain_shape, len(self.domain_shape)) else: F_star = DatatermLinear() F_star.set_proxdata(0) else: tau = 0.99 sigma = tau F_star = DatatermLinear() K = 0 G = DatatermLinearRecBregman() G.set_proxparam(tau) G.set_proxdata(data.ravel()) F_star.set_proxparam(sigma) pk = np.zeros(self.domain_shape) pk = pk.T.ravel() plt.Figure() ulast = np.zeros(self.domain_shape) u01 = ulast i = 0 while np.linalg.norm(u01.ravel() - data.ravel()) > self.assessment: print(np.linalg.norm(u01.ravel() - data.ravel())) print(self.assessment) self.solver = PdHgm(K, F_star, G) self.solver.maxiter = maxiter self.solver.tol = tol G.set_proxdata(data.ravel()) G.setQ(pk) self.solver.solve() u01 = np.reshape(np.real(self.solver.var['x']), self.domain_shape) pk = pk - (1 / self.alpha) * (u01.ravel() - data.ravel()) i = i + 1 if self.plot_iteration: plt.gray() plt.imshow(u01, vmin=0, vmax=1) plt.axis('off') #plt.title('RRE =' + str(round(RRE_breg, 2)), y=-0.1, fontsize=20) plt.savefig(self.data_output_path + 'Bregman_reconstruction_iter' + str(i) + '.png', bbox_inches='tight', pad_inches=0) plt.close() return np.reshape(self.solver.var['x'], self.domain_shape)