def __truediv__(self, y, niter=100): if self.explicit is True: if sp.sparse.issparse(self.A): # use scipy solver for sparse matrices xest = spsolve(self.A, y) elif isinstance(self.A, np.ndarray): # use scipy solvers for dense matrices (used for backward # compatibility, could be switched to numpy equivalents) if self.A.shape[0] == self.A.shape[1]: xest = solve(self.A, y) else: xest = lstsq(self.A, y)[0] else: # use numpy/cupy solvers for dense matrices ncp = get_array_module(y) if self.A.shape[0] == self.A.shape[1]: xest = ncp.linalg.solve(self.A, y) else: xest = ncp.linalg.lstsq(self.A, y)[0] else: if isinstance(y, np.ndarray): # numpy backend xest = lsqr(self, y, iter_lim=niter)[0] else: # cupy backend ncp = get_array_module(y) xest = cgls(self, y, x0=ncp.zeros(int(self.shape[1]), dtype=self.dtype), niter=niter)[0] return xest
def _rmatvec_serial(self, x): ncp = get_array_module(x) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y[self.mmops[iop]:self.mmops[iop + 1]] = \ oper.rmatvec(x[self.nnops[iop]:self.nnops[iop + 1]]).squeeze() return y
def _rmatvec_centered(self, x): ncp = get_array_module(x) if not self.reshape: x = x.squeeze() y = ncp.zeros(self.N, self.dtype) y[0:-2] -= (0.5 * x[1:-1]) / self.sampling y[2:] += (0.5 * x[1:-1]) / self.sampling if self.edge: y[0] -= x[0] / self.sampling y[1] += x[0] / self.sampling y[-2] -= x[-1] / self.sampling y[-1] += x[-1] / self.sampling else: x = ncp.reshape(x, self.dims) if self.dir > 0: # need to bring the dim. to derive to first dim. x = ncp.swapaxes(x, self.dir, 0) y = ncp.zeros(x.shape, self.dtype) y[0:-2] -= (0.5 * x[1:-1]) / self.sampling y[2:] += (0.5 * x[1:-1]) / self.sampling if self.edge: y[0] -= x[0] / self.sampling y[1] += x[0] / self.sampling y[-2] -= x[-1] / self.sampling y[-1] += x[-1] / self.sampling if self.dir > 0: y = ncp.swapaxes(y, 0, self.dir) y = y.ravel() return y
def _sincinterp(M, iava, dims=None, dir=0, dtype='float64'): """Sinc interpolation. """ ncp = get_array_module(iava) _checkunique(iava) # create sinc interpolation matrix nreg = M if dims is None else dims[dir] ireg = ncp.arange(nreg) sinc = ncp.tile(iava[:, np.newaxis], (1, nreg)) - \ ncp.tile(ireg, (len(iava), 1)) sinc = ncp.sinc(sinc) # identify additional dimensions and create MatrixMult operator otherdims = None if dims is not None: otherdims = ncp.array(dims) otherdims = ncp.roll(otherdims, -dir) otherdims = otherdims[1:] Op = MatrixMult(sinc, dims=otherdims, dtype=dtype) # create Transpose operator that brings dir to first dimension if dir > 0: axes = np.arange(len(dims), dtype=np.int) axes = np.roll(axes, -dir) dimsd = list(dims) dimsd[dir] = len(iava) Top = Transpose(dims, axes=axes, dtype=dtype) T1op = Transpose(dimsd, axes=axes, dtype=dtype) Op = T1op.H * Op * Top return Op
def __init__(self, iava, dims, dtype="float64"): ncp = get_array_module(iava) # check non-unique pairs (works only with numpy arrays) _checkunique(to_numpy(iava)) # define dimension of data ndims = len(dims) self.dims = dims self.dimsd = [len(iava[1])] + list(dims[2:]) # find indices and weights self.iava_t = ncp.floor(iava[0]).astype(int) self.iava_b = self.iava_t + 1 self.weights_tb = iava[0] - self.iava_t self.iava_l = ncp.floor(iava[1]).astype(int) self.iava_r = self.iava_l + 1 self.weights_lr = iava[1] - self.iava_l # expand dims to weights for nd-arrays if ndims > 2: for _ in range(ndims - 2): self.weights_tb = ncp.expand_dims(self.weights_tb, axis=-1) self.weights_lr = ncp.expand_dims(self.weights_lr, axis=-1) self.shape = (np.prod(np.array(self.dimsd)), np.prod(np.array(self.dims))) self.dtype = np.dtype(dtype) self.explicit = False
def nonstationary_convmtx(H, n, hc=0, pad=(0, 0)): r"""Convolution matrix from a bank of filters Makes a dense convolution matrix :math:`\mathbf{C}` such that the dot product ``np.dot(C, x)`` is the nonstationary convolution of the bank of filters :math:`H=[h_1, h_2, h_n]` and the input signal :math:`x`. Parameters ---------- H : :obj:`np.ndarray` Convolution filters (2D array of shape :math:`[n_{filters} \times n_{h}]` n : :obj:`int` Number of columns of convolution matrix hc : :obj:`np.ndarray`, optional Index of center of first filter pad : :obj:`np.ndarray` Zero-padding to apply to the bank of filters before and after the provided values (use it to avoid wrap-around or pass filters with enough padding) Returns ------- C : :obj:`np.ndarray` Convolution matrix """ ncp = get_array_module(H) H = ncp.pad(H, ((0, 0), pad), mode='constant') C = ncp.array([ncp.roll(h, ih) for ih, h in enumerate(H)]) C = C[:, pad[0] + hc:pad[0] + hc + n].T # take away edges return C
def __init__(self, A, dims=None, dtype="float64"): ncp = get_array_module(A) self.A = A if isinstance(A, ncp.ndarray): self.complex = np.iscomplexobj(A) else: self.complex = np.iscomplexobj(A.data) if dims is None: self.reshape = False self.shape = A.shape self.explicit = True else: if isinstance(dims, int): dims = (dims, ) self.reshape = True self.dims = np.array(dims, dtype=int) self.reshapedims = [ np.insert([np.prod(self.dims)], 0, self.A.shape[1]), np.insert([np.prod(self.dims)], 0, self.A.shape[0]), ] self.shape = ( A.shape[0] * np.prod(self.dims), A.shape[1] * np.prod(self.dims), ) self.explicit = False self.dtype = np.dtype(dtype) # Check dtype for correctness (upcast to complex when A is complex) if np.iscomplexobj(A) and not np.iscomplexobj( np.ones(1, dtype=self.dtype)): self.dtype = A.dtype logging.warning("Matrix A is a complex object, dtype cast to %s" % self.dtype)
def convmtx(h, n): r"""Convolution matrix Equivalent of `MATLAB's convmtx function <http://www.mathworks.com/help/signal/ref/convmtx.html>`_ . Makes a dense convolution matrix :math:`\mathbf{C}` such that the dot product ``np.dot(C, x)`` is the convolution of the filter :math:`h` and the input signal :math:`x`. Parameters ---------- h : :obj:`np.ndarray` Convolution filter (1D array) n : :obj:`int` Number of columns (if :math:`len(h) < n`) or rows (if :math:`len(h) \geq n`) of convolution matrix Returns ------- C : :obj:`np.ndarray` Convolution matrix of size :math:`len(h)+n-1 \times n` (if :math:`len(h) < n`) or :math:`n \times len(h)+n-1` (if :math:`len(h) \geq n`) """ ncp = get_array_module(h) if len(h) < n: col_1 = ncp.r_[h[0], ncp.zeros(n - 1, dtype=h.dtype)] row_1 = ncp.r_[h, ncp.zeros(n - 1, dtype=h.dtype)] else: row_1 = ncp.r_[h[0], ncp.zeros(n - 1, dtype=h.dtype)] col_1 = ncp.r_[h, ncp.zeros(n - 1, dtype=h.dtype)] C = get_toeplitz(h)(col_1, row_1) return C
def _matvec(self, x): ncp = get_array_module(x) x = ncp.reshape(x, self.dims) y = x[self.iava_t, self.iava_l] * (1 - self.weights_tb) * (1 - self.weights_lr) + \ x[self.iava_t, self.iava_r] * (1 - self.weights_tb) * self.weights_lr + \ x[self.iava_b, self.iava_l] * self.weights_tb * (1 - self.weights_lr) + \ x[self.iava_b, self.iava_r] * self.weights_tb * self.weights_lr return y.ravel()
def _IRLS_model(Op, data, nouter, threshR=False, epsR=1e-10, epsI=1e-10, x0=None, tolIRLS=1e-10, returnhistory=False, **kwargs_solver): r"""Iteratively reweighted least squares with L1 model term """ ncp = get_array_module(data) if x0 is not None: data = data - Op * x0 if returnhistory: xinv_hist = ncp.zeros((nouter + 1, int(Op.shape[1]))) rw_hist = ncp.zeros((nouter + 1, int(Op.shape[0]))) Iop = Identity(data.size, dtype=data.dtype) # first iteration (unweighted least-squares) if ncp == np: xinv = Op.H @ \ lsqr(Op @ Op.H + (epsI ** 2) * Iop, data, **kwargs_solver)[0] else: xinv = Op.H @ cgls(Op @ Op.H + (epsI ** 2) * Iop, data, ncp.zeros(int(Op.shape[0]), dtype=Op.dtype), **kwargs_solver)[0] if returnhistory: xinv_hist[0] = xinv for iiter in range(nouter): # other iterations (weighted least-squares) xinvold = xinv.copy() rw = np.abs(xinv) rw = rw / rw.max() R = Diagonal(rw, dtype=rw.dtype) if ncp == np: xinv = R @ Op.H @ lsqr(Op @ R @ Op.H + epsI ** 2 * Iop, data, **kwargs_solver)[0] else: xinv = R @ Op.H @ cgls(Op @ R @ Op.H + epsI ** 2 * Iop, data, ncp.zeros(int(Op.shape[0]), dtype=Op.dtype), **kwargs_solver)[0] # save history if returnhistory: rw_hist[iiter] = rw xinv_hist[iiter + 1] = xinv # check tolerance if np.linalg.norm(xinv - xinvold) < tolIRLS: nouter = iiter break # adding initial guess if x0 is not None: xinv = x0 + xinv if returnhistory: xinv_hist = x0 + xinv_hist if returnhistory: return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1] else: return xinv, nouter
def _matvec(self, x): ncp = get_array_module(x) if self.explicit: y = self.Opcol @ x else: y = ncp.zeros(int(self.Op.shape[1]), dtype=self.dtype) y[self.cols] = x y = self.Op._matvec(y) return y
def _matvec(self, x): ncp = get_array_module(x) if self.reshape: x = ncp.reshape(x, self.reshapedims[0]) y = self.A.dot(x) if self.reshape: return y.ravel() else: return y
def _rmatvec(self, x): ncp = get_array_module(x) y = self.Op._rmatvec(x) if self.adj: if self.real: y = ncp.real(y) else: y = -ncp.imag(y) return y
def __init__(self, N, h, dims, offset=None, dirs=None, method='fft', dtype='float64'): ncp = get_array_module(h) self.h = h self.nh = np.array(self.h.shape) self.dirs = np.arange(len(dims)) if dirs is None else np.array(dirs) # padding if offset is None: offset = np.zeros(self.h.ndim, dtype=np.int) else: offset = np.array(offset, dtype=np.int) self.offset = 2 * (self.nh // 2 - offset) pad = [(0, 0) for _ in range(self.h.ndim)] dopad = False for inh, nh in enumerate(self.nh): if nh % 2 == 0: self.offset[inh] -= 1 if self.offset[inh] != 0: pad[inh] = [ self.offset[inh] if self.offset[inh] > 0 else 0, -self.offset[inh] if self.offset[inh] < 0 else 0 ] dopad = True if dopad: self.h = ncp.pad(self.h, pad, mode='constant') self.nh = self.h.shape # find out which directions are used for convolution and define offsets if len(dims) != len(self.nh): dimsh = np.ones(len(dims), dtype=np.int) for idir, dir in enumerate(self.dirs): dimsh[dir] = self.nh[idir] self.h = self.h.reshape(dimsh) if np.prod(dims) != N: raise ValueError('product of dims must equal N!') else: self.dims = np.array(dims) self.reshape = True # convolve and correate functions self.convolve = get_convolve(h) self.correlate = get_correlate(h) self.method = method self.shape = (np.prod(self.dims), np.prod(self.dims)) self.dtype = np.dtype(dtype) self.explicit = False
def __init__(self, taxis, order, dtype='float64'): ncp = get_array_module(taxis) if not isinstance(taxis, ncp.ndarray): logging.error('t must be numpy.ndarray...') raise TypeError('t must be numpy.ndarray...') else: self.taxis = taxis self.order = order self.shape = (len(self.taxis), self.order+1) self.dtype = np.dtype(dtype) self.explicit = False
def _matvec(self, x): ncp = get_array_module(x) if not self.inplace: x = x.copy() if not self.reshape: y = x[self.iava] else: x = ncp.reshape(x, self.dims) y = ncp.take(x, self.iava, axis=self.dir) y = y.ravel() return y
def _rmatvec(self, x): ncp = get_array_module(x) if not self.inplace: x = x.copy() if self.shape[0] == self.shape[1]: y = x elif self.shape[0] < self.shape[1]: y = ncp.zeros(self.shape[1], dtype=self.dtype) y[:self.shape[0]] = x else: y = x[:self.shape[1]] return y
def _IRLS_data(Op, data, nouter, threshR=False, epsR=1e-10, epsI=1e-10, x0=None, tolIRLS=1e-10, returnhistory=False, **kwargs_solver): r"""Iteratively reweighted least squares with L1 data term """ ncp = get_array_module(data) if x0 is not None: data = data - Op * x0 if returnhistory: xinv_hist = ncp.zeros((nouter + 1, int(Op.shape[1]))) rw_hist = ncp.zeros((nouter + 1, int(Op.shape[0]))) # first iteration (unweighted least-squares) xinv = NormalEquationsInversion(Op, None, data, epsI=epsI, returninfo=False, **kwargs_solver) r = data - Op * xinv if returnhistory: xinv_hist[0] = xinv for iiter in range(nouter): # other iterations (weighted least-squares) xinvold = xinv.copy() if threshR: rw = 1. / ncp.maximum(ncp.abs(r), epsR) else: rw = 1. / (ncp.abs(r) + epsR) rw = rw / rw.max() R = Diagonal(rw) xinv = NormalEquationsInversion(Op, [], data, Weight=R, epsI=epsI, returninfo=False, **kwargs_solver) r = data - Op * xinv # save history if returnhistory: rw_hist[iiter] = rw xinv_hist[iiter + 1] = xinv # check tolerance if ncp.linalg.norm(xinv - xinvold) < tolIRLS: nouter = iiter break # adding initial guess if x0 is not None: xinv = x0 + xinv if returnhistory: xinv_hist = x0 + xinv_hist if returnhistory: return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1] else: return xinv, nouter
def matrix(self): """Return diagonal matrix as dense :obj:`numpy.ndarray` Returns ------- densemat : :obj:`numpy.ndarray` Dense matrix. """ ncp = get_array_module(self.diag) densemat = ncp.diag(self.diag.squeeze()) return densemat
def _matvec(self, x): ncp = get_array_module(x) x = ncp.squeeze(x.reshape(self.nsl, self.ny, self.nz)) if self.usematmul: if self.nz == 1: x = x[..., ncp.newaxis] y = ncp.matmul(self.G, x) else: y = ncp.squeeze( ncp.zeros((self.nsl, self.nx, self.nz), dtype=self.dtype)) for isl in range(self.nsl): y[isl] = ncp.dot(self.G[isl], x[isl]) return y.ravel()
def _rmatvec(self, x): ncp = get_array_module(x) if self.reshape: x = ncp.reshape(x, self.reshapedims[1]) if self.complex: y = (self.A.T.dot(x.conj())).conj() else: y = self.A.T.dot(x) if self.reshape: return y.ravel() else: return y
def _rmatvec(self, x): ncp = get_array_module(x) if self.reshape: x = ncp.reshape(x, self.dimsd) if self.dir > 0: # bring the dimension to symmetrize to first x = ncp.swapaxes(x, self.dir, 0) y = x[self.nsym - 1:].copy() y[1:] += x[self.nsym - 2::-1] if self.dir > 0: y = ncp.swapaxes(y, 0, self.dir) if self.reshape: y = ncp.ndarray.flatten(y) return y
def _rmatvec(self, x): ncp = get_array_module(x) ncp_add_at = get_add_at(x) x = ncp.reshape(x, self.dimsd) y = ncp.zeros(self.dims, dtype=self.dtype) ncp_add_at(y, [self.iava_t, self.iava_l], x * (1 - self.weights_tb) * (1 - self.weights_lr)) ncp_add_at(y, [self.iava_t, self.iava_r], x * (1 - self.weights_tb) * self.weights_lr) ncp_add_at(y, [self.iava_b, self.iava_l], x * self.weights_tb * (1 - self.weights_lr)) ncp_add_at(y, [self.iava_b, self.iava_r], x * self.weights_tb * self.weights_lr) return y.ravel()
def inv(self): r"""Return the inverse of :math:`\mathbf{A}`. Returns ---------- Ainv : :obj:`numpy.ndarray` Inverse matrix. """ if sp.sparse.issparse(self.A): Ainv = inv(self.A) else: ncp = get_array_module(self.A) Ainv = ncp.linalg.inv(self.A) return Ainv
def _matvec(self, x): ncp = get_array_module(x) y = ncp.zeros(self.dimsd, dtype=self.dtype) if self.reshape: x = ncp.reshape(x, self.dims) if self.dir > 0: # bring the dimension to symmetrize to first x = ncp.swapaxes(x, self.dir, 0) y = ncp.swapaxes(y, self.dir, 0) y[self.nsym - 1:] = x y[:self.nsym - 1] = x[-1:0:-1] if self.dir > 0: y = ncp.swapaxes(y, 0, self.dir) if self.reshape: y = ncp.ndarray.flatten(y) return y
def _matvec_forward(self, x): ncp = get_array_module(x) if not self.reshape: x = x.squeeze() y = ncp.zeros(self.N, self.dtype) y[:-1] = (x[1:] - x[:-1]) / self.sampling else: x = ncp.reshape(x, self.dims) if self.dir > 0: # need to bring the dim. to derive to first dim. x = ncp.swapaxes(x, self.dir, 0) y = ncp.zeros(x.shape, self.dtype) y[:-1] = (x[1:] - x[:-1]) / self.sampling if self.dir > 0: y = ncp.swapaxes(y, 0, self.dir) y = y.ravel() return y
def __init__( self, theta, vsvp=0.5, nt0=1, spatdims=None, linearization="akirich", dtype="float64", ): self.ncp = get_array_module(theta) self.nt0 = nt0 if not isinstance(vsvp, self.ncp.ndarray) else len(vsvp) self.ntheta = len(theta) if spatdims is None: self.spatdims = () nspatdims = 1 else: self.spatdims = spatdims if isinstance(spatdims, tuple) else (spatdims, ) nspatdims = np.prod(spatdims) # Compute AVO coefficients if linearization == "akirich": Gs = akirichards(theta, vsvp, n=self.nt0) elif linearization == "fatti": Gs = fatti(theta, vsvp, n=self.nt0) elif linearization == "ps": Gs = ps(theta, vsvp, n=self.nt0) else: logging.error("%s not an available " "linearization...", linearization) raise NotImplementedError("%s not an available linearization..." % linearization) self.G = self.ncp.concatenate([gs.T[:, self.ncp.newaxis] for gs in Gs], axis=1) # add dimensions to G to account for horizonal axes for _ in range(len(self.spatdims)): self.G = self.G[..., np.newaxis] self.npars = len(Gs) self.shape = ( self.nt0 * self.ntheta * nspatdims, self.nt0 * self.npars * nspatdims, ) self.dtype = np.dtype(dtype) self.explicit = False
def __init__(self, diag, dims=None, dir=0, dtype='float64'): ncp = get_array_module(diag) self.diag = diag.flatten() self.complex = True if ncp.iscomplexobj(self.diag) else False if dims is None: self.shape = (len(self.diag), len(self.diag)) self.dims = None self.reshape = False else: diagdims = [1] * len(dims) diagdims[dir] = dims[dir] self.diag = self.diag.reshape(diagdims) self.shape = (np.prod(dims), np.prod(dims)) self.dims = dims self.reshape = True self.dtype = np.dtype(dtype) self.explicit = False
def _rmatvec(self, x): ncp = get_array_module(x) x = ncp.squeeze(x.reshape(self.nsl, self.nx, self.nz)) if self.usematmul: if self.nz == 1: x = x[..., ncp.newaxis] if hasattr(self, 'GT'): y = ncp.matmul(self.GT, x) else: y = ncp.matmul(self.G.transpose((0, 2, 1)).conj(), x) else: y = ncp.squeeze(ncp.zeros((self.nsl, self.ny, self.nz), dtype=self.dtype)) if hasattr(self, 'GT'): for isl in range(self.nsl): y[isl] = ncp.dot(self.GT[isl], x[isl]) else: for isl in range(self.nsl): y[isl] = ncp.dot(self.G[isl].conj().T, x[isl]) return y.ravel()
def _matvec(self, x): ncp = get_array_module(x) if not self.reshape: x = x.squeeze() y = ncp.zeros(self.N, self.dtype) y[1:-1] = (x[2:] - 2 * x[1:-1] + x[0:-2]) / self.sampling**2 if self.edge: y[0] = (x[0] - 2 * x[1] + x[2]) / self.sampling**2 y[-1] = (x[-3] - 2 * x[-2] + x[-1]) / self.sampling**2 else: x = ncp.reshape(x, self.dims) if self.dir > 0: # need to bring the dim. to derive to first dim. x = ncp.swapaxes(x, self.dir, 0) y = ncp.zeros(x.shape, self.dtype) y[1:-1] = (x[2:] - 2 * x[1:-1] + x[0:-2]) / self.sampling**2 if self.edge: y[0] = (x[0] - 2 * x[1] + x[2]) / self.sampling**2 y[-1] = (x[-3] - 2 * x[-2] + x[-1]) / self.sampling**2 if self.dir > 0: y = ncp.swapaxes(y, 0, self.dir) y = y.ravel() return y