def Laplacian(shape: tuple, weights: Tuple[float] = (1, 1), step: Union[tuple, float] = 1., edge: bool = True, dtype: str = 'float64') -> PyLopLinearOperator: r""" Laplacian. Computes the Laplacian of a 2D array. Parameters ---------- shape: tuple Shape of the input array. weights: Tuple[float] Weight to apply to each direction (real laplacian operator if ``weights=[1,1]``) step: Union[float, Tuple[float, ...]] Step size in each direction. edge: bool Use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input vector. kind: str Derivative kind (``forward``, ``centered``, or ``backward``). Returns ------- :py:class:`pycsou.core.linop.LinearOperator` Laplacian operator. Examples -------- .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import Laplacian from pycsou.util.misc import peaks x = np.linspace(-2.5, 2.5, 25) X,Y = np.meshgrid(x,x) Z = peaks(X, Y) Dop = Laplacian(shape=Z.shape) y = Dop * Z.flatten() plt.figure() h = plt.pcolormesh(X,Y,Z, shading='auto') plt.colorbar(h) plt.title('Signal') plt.figure() h = plt.pcolormesh(X,Y,y.reshape(X.shape), shading='auto') plt.colorbar(h) plt.title('Laplacian') plt.show() Notes ----- The Laplacian operator sums the second directional derivatives of a 2D array along the two canonical directions. It is defined as: .. math:: y[i, j] =\frac{x[i+1, j] + x[i-1, j] + x[i, j-1] +x[i, j+1] - 4x[i, j]} {dx\times dy}. See Also -------- :py:func:`~pycsou.linop.diff.DirectionalLaplacian`, :py:func:`~pycsou.linop.diff.SecondDerivative` """ if isinstance(step, Number): step = [step] * len(shape) return PyLopLinearOperator(pylops.Laplacian(dims=shape, weights=weights, sampling=step, edge=edge, dtype=dtype))
def SecondDirectionalDerivative(shape: tuple, directions: np.ndarray, step: Union[float, Tuple[float, ...]] = 1., edge: bool = True, dtype: str = 'float64'): r""" Second directional derivative. Computes the second directional derivative of a multi-dimensional array (at least two dimensions are required) along either a single common direction or different ``directions`` for each entry of the array. Parameters ---------- shape: tuple Shape of the input array. directions: np.ndarray Single direction (array of size :math:`n_{dims}`) or different directions for each entry (array of size :math:`[n_{dims} \times (n_{d_0} \times ... \times n_{d_{n_{dims}}})]`). Each column should be normalised. step: Union[float, Tuple[float, ...]] Step size in each direction. edge: bool Use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input vector. Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` Second directional derivative operator. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.diff import SecondDirectionalDerivative from pycsou.util.misc import peaks .. doctest:: >>> x = np.linspace(-2.5, 2.5, 100) >>> X,Y = np.meshgrid(x,x) >>> Z = peaks(X, Y) >>> direction = np.array([1,0]) >>> Dop = SecondDirectionalDerivative(shape=Z.shape, directions=direction) >>> dir_d2 = (Dop * Z.reshape(-1)).reshape(Z.shape) .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import FirstDirectionalDerivative, SecondDirectionalDerivative from pycsou.util.misc import peaks x = np.linspace(-2.5, 2.5, 25) X,Y = np.meshgrid(x,x) Z = peaks(X, Y) directions = np.zeros(shape=(2,Z.size)) directions[0, :Z.size//2] = 1 directions[1, Z.size//2:] = 1 Dop = FirstDirectionalDerivative(shape=Z.shape, directions=directions) Dop2 = SecondDirectionalDerivative(shape=Z.shape, directions=directions) y = Dop * Z.flatten() y2 = Dop2 * Z.flatten() plt.figure() h = plt.pcolormesh(X,Y,Z, shading='auto') plt.quiver(x, x, directions[1].reshape(X.shape), directions[0].reshape(X.shape)) plt.colorbar(h) plt.title('Signal and directions of derivatives') plt.figure() h = plt.pcolormesh(X,Y,y.reshape(X.shape), shading='auto') plt.quiver(x, x, directions[1].reshape(X.shape), directions[0].reshape(X.shape)) plt.colorbar(h) plt.title('First Directional derivatives') plt.figure() h = plt.pcolormesh(X,Y,y2.reshape(X.shape), shading='auto') plt.colorbar(h) plt.title('Second Directional derivatives') plt.show() Notes ----- The ``SecondDirectionalDerivative`` applies a second-order derivative to a multi-dimensional array along the direction defined by the unitary vector :math:`\mathbf{v}`: .. math:: d^2_\mathbf{v} f = - d_\mathbf{v}^\ast (d_\mathbf{v} f) where :math:`d_\mathbf{v}` is the first-order directional derivative implemented by :py:func:`~pycsou.linop.diff.FirstDirectionalDerivative`. The above formula generalises the well-known relationship: .. math:: \Delta f= -\text{div}(\nabla f), where minus the divergence operator is the adjoint of the gradient. **Note that problematic values at edges are set to zero.** See Also -------- :py:func:`~pycsou.linop.diff.FirstDirectionalDerivative`, :py:func:`~pycsou.linop.diff.SecondDerivative` """ Pylop = PyLopLinearOperator( pylops.SecondDirectionalDerivative(dims=shape, v=directions, sampling=step, edge=edge, dtype=dtype)) kill_edges = np.ones(shape=shape) for axis in range(len(shape)): kill_edges = np.swapaxes(kill_edges, axis, 0) kill_edges[-2:] = 0 kill_edges[:2] = 0 kill_edges = np.swapaxes(kill_edges, 0, axis) KillEdgeOp = DiagonalOperator(kill_edges.reshape(-1)) DirD2 = KillEdgeOp * Pylop return DirD2
def Gradient(shape: tuple, step: Union[tuple, float] = 1., edge: bool = True, dtype: str = 'float64', kind: str = 'centered') -> PyLopLinearOperator: r""" Gradient. Computes the gradient of a multi-dimensional array (at least two dimensions are required). Parameters ---------- shape: tuple Shape of the input array. step: Union[float, Tuple[float, ...]] Step size in each direction. edge: bool For ``kind = 'centered'``, use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input vector. kind: str Derivative kind (``forward``, ``centered``, or ``backward``). Returns ------- :py:class:`pycsou.core.linop.LinearOperator` Gradient operator. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.diff import Gradient, FirstDerivative from pycsou.util.misc import peaks .. doctest:: >>> x = np.linspace(-2.5, 2.5, 100) >>> X,Y = np.meshgrid(x,x) >>> Z = peaks(X, Y) >>> Nabla = Gradient(shape=Z.shape, kind='forward') >>> D = FirstDerivative(size=Z.size, shape=Z.shape, kind='forward') >>> np.allclose((Nabla * Z.flatten())[:Z.size], D * Z.flatten()) True .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import Gradient from pycsou.util.misc import peaks x = np.linspace(-2.5, 2.5, 25) X,Y = np.meshgrid(x,x) Z = peaks(X, Y) Dop = Gradient(shape=Z.shape) y = Dop * Z.flatten() plt.figure() h = plt.pcolormesh(X,Y,Z, shading='auto') plt.colorbar(h) plt.title('Signal') plt.figure() h = plt.pcolormesh(X,Y,y[:Z.size].reshape(X.shape), shading='auto') plt.colorbar(h) plt.title('Gradient (1st component)') plt.figure() h = plt.pcolormesh(X,Y,y[Z.size:].reshape(X.shape), shading='auto') plt.colorbar(h) plt.title('Gradient (2nd component)') plt.show() Notes ----- The ``Gradient`` operator applies a first-order derivative to each dimension of a multi-dimensional array in forward mode. For simplicity, given a three dimensional array, the ``Gradient`` in forward mode using a centered stencil can be expressed as: .. math:: \mathbf{g}_{i, j, k} = (f_{i+1, j, k} - f_{i-1, j, k}) / d_1 \mathbf{i_1} + (f_{i, j+1, k} - f_{i, j-1, k}) / d_2 \mathbf{i_2} + (f_{i, j, k+1} - f_{i, j, k-1}) / d_3 \mathbf{i_3} which is discretized as follows: .. math:: \mathbf{g} = \begin{bmatrix} \mathbf{df_1} \\ \mathbf{df_2} \\ \mathbf{df_3} \end{bmatrix}. In adjoint mode, the adjoints of the first derivatives along different axes are instead summed together. See Also -------- :py:func:`~pycsou.linop.diff.DirectionalGradient`, :py:func:`~pycsou.linop.diff.FirstDerivative` """ return PyLopLinearOperator(pylops.Gradient(dims=shape, sampling=step, edge=edge, dtype=dtype, kind=kind))
def FirstDerivative(size: int, shape: Optional[tuple] = None, axis: int = 0, step: float = 1.0, edge: bool = True, dtype: str = 'float64', kind: str = 'forward') -> PyLopLinearOperator: r""" First derivative. *This docstring was adapted from ``pylops.FirstDerivative``.* Approximates the first derivative of a multi-dimensional array along a specific ``axis`` using finite-differences. Parameters ---------- size: int Size of the input array. shape: tuple Shape of the input array. axis: int Axis along which to differentiate. step: float Step size. edge: bool For ``kind='centered'``, use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input array. kind: str Derivative kind (``forward``, ``centered``, or ``backward``). Returns ------- :py:class:`~pycsou.linop.base.PyLopLinearOperator` First derivative operator. Raises ------ ValueError If ``shape`` and ``size`` are not compatible. NotImplementedError If ``kind`` is not one of: ``forward``, ``centered``, or ``backward``. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.diff import FirstDerivative .. doctest:: >>> x = np.repeat([0,2,1,3,0,2,0], 10) >>> Dop = FirstDerivative(size=x.size) >>> y = Dop * x >>> np.sum(np.abs(y) > 0) 6 >>> np.allclose(y, np.diff(x, append=0)) True .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import FirstDerivative x = np.repeat([0,2,1,3,0,2,0], 10) Dop_bwd = FirstDerivative(size=x.size, kind='backward') Dop_fwd = FirstDerivative(size=x.size, kind='forward') Dop_cent = FirstDerivative(size=x.size, kind='centered') y_bwd = Dop_bwd * x y_cent = Dop_cent * x y_fwd = Dop_fwd * x plt.figure() plt.plot(np.arange(x.size), x) plt.plot(np.arange(x.size), y_bwd) plt.plot(np.arange(x.size), y_cent) plt.plot(np.arange(x.size), y_fwd) plt.legend(['Signal', 'Backward', 'Centered', 'Forward']) plt.title('First derivative') plt.show() Notes ----- The ``FirstDerivative`` operator applies a first derivative along a given axis of a multi-dimensional array using either a *second-order centered stencil* or *first-order forward/backward stencils*. For simplicity, given a one dimensional array, the second-order centered first derivative is: .. math:: y[i] = (0.5x[i+1] - 0.5x[i-1]) / \text{step} while the first-order forward stencil is: .. math:: y[i] = (x[i+1] - x[i]) / \text{step} and the first-order backward stencil is: .. math:: y[i] = (x[i] - x[i-1]) / \text{step}. See Also -------- :py:func:`~pycsou.linop.diff.SecondDerivative`, :py:func:`~pycsou.linop.diff.GeneralisedDerivative` """ first_derivative = pylops.FirstDerivative(N=size, dims=shape, dir=axis, sampling=step, edge=edge, dtype=dtype, kind=kind) return PyLopLinearOperator(first_derivative)
def FirstDirectionalDerivative(shape: tuple, directions: np.ndarray, step: Union[float, Tuple[float, ...]] = 1., edge: bool = True, dtype: str = 'float64', kind: str = 'centered') -> PyLopLinearOperator: r""" First directional derivative. Computes the directional derivative of a multi-dimensional array (at least two dimensions are required) along either a single common direction or different ``directions`` for each entry of the array. Parameters ---------- shape: tuple Shape of the input array. directions: np.ndarray Single direction (array of size :math:`n_{dims}`) or different directions for each entry (array of size :math:`[n_{dims} \times (n_{d_0} \times ... \times n_{d_{n_{dims}}})]`). Each column should be normalised. step: Union[float, Tuple[float, ...]] Step size in each direction. edge: bool For ``kind = 'centered'``, use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input vector. kind: str Derivative kind (``forward``, ``centered``, or ``backward``). Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` Directional derivative operator. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.diff import FirstDirectionalDerivative, FirstDerivative from pycsou.util.misc import peaks .. doctest:: >>> x = np.linspace(-2.5, 2.5, 100) >>> X,Y = np.meshgrid(x,x) >>> Z = peaks(X, Y) >>> direction = np.array([1,0]) >>> Dop = FirstDirectionalDerivative(shape=Z.shape, directions=direction, kind='forward') >>> D = FirstDerivative(size=Z.size, shape=Z.shape, kind='forward') >>> np.allclose(Dop * Z.flatten(), D * Z.flatten()) True .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import FirstDirectionalDerivative, FirstDerivative from pycsou.util.misc import peaks x = np.linspace(-2.5, 2.5, 25) X,Y = np.meshgrid(x,x) Z = peaks(X, Y) directions = np.zeros(shape=(2,Z.size)) directions[0, :Z.size//2] = 1 directions[1, Z.size//2:] = 1 Dop = FirstDirectionalDerivative(shape=Z.shape, directions=directions) y = Dop * Z.flatten() plt.figure() h = plt.pcolormesh(X,Y,Z, shading='auto') plt.quiver(x, x, directions[1].reshape(X.shape), directions[0].reshape(X.shape)) plt.colorbar(h) plt.title('Signal and directions of derivatives') plt.figure() h = plt.pcolormesh(X,Y,y.reshape(X.shape), shading='auto') plt.colorbar(h) plt.title('Directional derivatives') plt.show() Notes ----- The ``FirstDirectionalDerivative`` applies a first-order derivative to a multi-dimensional array along the direction defined by the unitary vector :math:`\mathbf{v}`: .. math:: d_\mathbf{v}f = \langle\nabla f, \mathbf{v}\rangle, or along the directions defined by the unitary vectors :math:`\mathbf{v}(x, y)`: .. math:: d_\mathbf{v}(x,y) f = \langle\nabla f(x,y), \mathbf{v}(x,y)\rangle where we have here considered the 2-dimensional case. Note that the 2D case, choosing :math:`\mathbf{v}=[1,0]` or :math:`\mathbf{v}=[0,1]` is equivalent to the ``FirstDerivative`` operator applied to axis 0 or 1 respectively. See Also -------- :py:func:`~pycsou.linop.diff.SecondDirectionalDerivative`, :py:func:`~pycsou.linop.diff.FirstDerivative` """ return PyLopLinearOperator( pylops.FirstDirectionalDerivative(dims=shape, v=directions, sampling=step, edge=edge, dtype=dtype, kind=kind))
def Integration1D(size: int, shape: Optional[tuple] = None, axis: int = 0, step: float = 1., dtype='float64') -> PyLopLinearOperator: r""" 1D integral/cumsum operator. Integrates a multi-dimensional array along a specific ``axis``. Parameters ---------- size: int Size of the input array. shape: Optional[tuple] Shape of the input array if multi-dimensional. axis: int Axis along which integration is performed. step: float Step size. dtype: str Type of elements in input array. Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` Integral operator. Examples -------- .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import Integration1D x = np.array([0,0,0,1,0,0,0,0,0,2,0,0,0,0,-1,0,0,0,0,2,0,0,0,0]) Int = Integration1D(size=x.size) y = Int * x plt.figure() plt.plot(np.arange(x.size), x) plt.plot(np.arange(x.size), y) plt.legend(['Signal', 'Integral']) plt.title('Integration') plt.show() Notes ----- The ``Integration1D`` operator applies a causal integration to any chosen direction of a multi-dimensional array. For simplicity, given a one dimensional array, the causal integration is: .. math:: y(t) = \int x(t) dt which can be discretised as : .. math:: y[i] = \sum_{j=0}^i x[j] dt, where :math:`dt` is the ``sampling`` interval. See Also -------- :py:func:`~pycsou.linop.diff.FirstDerivative` """ return PyLopLinearOperator( pylops.CausalIntegration(N=size, dims=shape, dir=axis, sampling=step, halfcurrent=False, dtype=dtype))
def SecondDerivative(size: int, shape: Optional[tuple] = None, axis: int = 0, step: float = 1.0, edge: bool = True, dtype: str = 'float64') -> PyLopLinearOperator: r""" Second derivative. *This docstring was adapted from ``pylops.SecondDerivative``.* Approximates the second derivative of a multi-dimensional array along a specific ``axis`` using finite-differences. Parameters ---------- size: int Size of the input array. shape: tuple Shape of the input array. axis: int Axis along which to differentiate. step: float Step size. edge: bool Use reduced order derivative at edges (``True``) or ignore them (``False``). dtype: str Type of elements in input array. Returns ------- :py:class:`~pycsou.linop.base.PyLopLinearOperator` Second derivative operator. Raises ------ ValueError If ``shape`` and ``size`` are not compatible. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.diff import SecondDerivative .. doctest:: >>> x = np.linspace(-2.5, 2.5, 100) >>> z = np.piecewise(x, [x < -1, (x >= - 1) * (x<0), x>=0], [lambda x: -x, lambda x: 3 * x + 4, lambda x: -0.5 * x + 4]) >>> Dop = SecondDerivative(size=x.size) >>> y = Dop * z .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.diff import SecondDerivative x = np.linspace(-2.5, 2.5, 200) z = np.piecewise(x, [x < -1, (x >= - 1) * (x<0), x>=0], [lambda x: -x, lambda x: 3 * x + 4, lambda x: -0.5 * x + 4]) Dop = SecondDerivative(size=x.size) y = Dop * z plt.figure() plt.plot(np.arange(x.size), z) plt.title('Signal') plt.show() plt.figure() plt.plot(np.arange(x.size), y) plt.title('Second Derivative') plt.show() Notes ----- The ``SecondDerivative`` operator applies a second derivative to any chosen direction of a multi-dimensional array. For simplicity, given a one dimensional array, the second-order centered second derivative is given by: .. math:: y[i] = (x[i+1] - 2x[i] + x[i-1]) / \text{step}^2. See Also -------- :py:func:`~pycsou.linop.diff.FirstDerivative`, :py:func:`~pycsou.linop.diff.GeneralisedDerivative` """ return PyLopLinearOperator( pylops.SecondDerivative(N=size, dims=shape, dir=axis, sampling=step, edge=edge, dtype=dtype))
def MovingAverage2D(window_shape: Union[tuple, list], shape: tuple, dtype='float64'): r""" 2D moving average. Apply moving average to a 2D array. Parameters ---------- window_size: Union[tuple, list] Shape of the window for moving average (sizes in each dimension must be *odd*). shape: tuple Shape of the input array. dtype: str Type of elements in input array. Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` 2D moving average operator. Examples -------- .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.conv import MovingAverage2D from scipy import signal sig = np.zeros(shape=(100,100)) sig[sig.shape[0] // 2 - 2:sig.shape[0] // 2 + 3, sig.shape[1] // 2 - 2:sig.shape[1] // 2 + 3] = 1 MAOp = MovingAverage2D(window_shape=(50,25), shape=sig.shape) moving_average = (MAOp * sig.ravel()).reshape(sig.shape) plt.figure() plt.subplot(1,2,1) plt.imshow(sig, cmap='plasma'); plt.title('Signal') plt.subplot(1,2,2) plt.imshow(moving_average, cmap='plasma'); plt.title('Moving Average') plt.show() Notes ----- The ``MovingAverage2D`` operator is a special type of convolution operator that convolves a 2D array with a constant 2d filter of size :math:`n_{smooth, 1} \quad \times \quad n_{smooth, 2}`: .. math:: y[i,j] = \frac{1}{n_{smooth, 1} n_{smooth, 2}} \sum_{l=-(n_{smooth,1}-1)/2}^{(n_{smooth,1}-1)/2} \sum_{m=-(n_{smooth,2}-1)/2}^{(n_{smooth,2}-1)/2} x[l,m] Note that since the filter is symmetrical, the ``MovingAverage2D`` operator is self-adjoint. """ PyLop = pylops.Smoothing2D(nsmooth=window_shape, dims=shape, nodir=None, dtype=dtype) return PyLopLinearOperator(PyLop)
def MovingAverage1D(window_size: int, shape: tuple, axis: int = 0, dtype='float64'): r""" 1D moving average. Apply moving average to a multi-dimensional array along a specific axis. Parameters ---------- window_size: int Size of the window for moving average (must be *odd*). shape: tuple Shape of the input array. axis: int Axis along which moving average is applied. dtype: str Type of elements in input array. Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` 1D moving average operator. Examples -------- .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.conv import MovingAverage1D from scipy import signal sig = np.zeros(shape=(100,100)) sig[sig.shape[0] // 2 - 2:sig.shape[0] // 2 + 3, sig.shape[1] // 2 - 2:sig.shape[1] // 2 + 3] = 1 MAOp = MovingAverage1D(window_size=25, shape=sig.shape, axis=0) moving_average = (MAOp * sig.ravel()).reshape(sig.shape) plt.figure() plt.subplot(1,2,1) plt.imshow(sig, cmap='plasma'); plt.title('Signal') plt.subplot(1,2,2) plt.imshow(moving_average, cmap='plasma'); plt.title('Moving Average') plt.show() Notes ----- The ``MovingAverage1D`` operator is a special type of convolution operator that convolves along a specific axis an array with a constant filter of size :math:`n_{smooth}`: .. math:: \mathbf{h} = [ 1/n_{smooth}, 1/n_{smooth}, ..., 1/n_{smooth} ] For example, for a 3D array :math:`x`, ``MovingAverage1D`` applied to the first axis yields: .. math:: y[i,j,k] = 1/n_{smooth} \sum_{l=-(n_{smooth}-1)/2}^{(n_{smooth}-1)/2} x[l,j,k]. Note that since the filter is symmetrical, the ``MovingAverage1D`` operator is self-adjoint. """ PyLop = pylops.Smoothing1D(nsmooth=window_size, dims=shape, dir=axis, dtype=dtype) return PyLopLinearOperator(PyLop)
def Convolve1D(size: int, filter: np.ndarray, reshape_dims: Optional[tuple] = None, axis: int = 0, dtype: type = 'float64', method: Optional[str] = None) -> PyLopLinearOperator: r""" 1D convolution operator. *This docstring was adapted from ``pylops.signalprocessing.Convolve1D``.* Convolve a multi-dimensional array along a specific ``axis`` with a one-dimensional compact ``filter``. Parameters ---------- size: int Size of the input array. filter: np.ndarray 1d compact filter. The latter should be real-valued and centered around its mid-size index. reshape_dims: Optional[tuple] Shape of the array to which the convolution should be applied. axis: int Axis along which to apply convolution. dtype: str Type of elements of the input array. method: Optional[str] Method used to calculate the convolution (``direct``, ``fft``, or ``overlapadd``). Note that only ``direct`` and ``fft`` are allowed when ``dims=None``, whilst ``fft`` and ``overlapadd`` are allowed when ``dims`` is provided. Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` Convolution operator. Raises ------ NotImplementedError If ``method`` provided is not allowed. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.conv import Convolve1D from scipy import signal .. doctest:: >>> sig = np.repeat([0., 1., 0.], 10) >>> filter = signal.hann(5); filter[filter.size//2:] = 0 >>> ConvOp = Convolve1D(size=sig.size, filter=filter) >>> filtered = ConvOp * sig >>> filtered_scipy = signal.convolve(sig, filter, mode='same', method='direct') >>> np.allclose(filtered, filtered_scipy) True .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.conv import Convolve1D from scipy import signal sig = np.repeat([0., 1., 0.], 100) filter = signal.hann(50); filter[filter.size//2:] = 0 ConvOp = Convolve1D(size=sig.size, filter=filter) filtered = ConvOp * sig correlated = ConvOp.H * sig backprojected = ConvOp.DomainGram * sig plt.figure() plt.subplot(2,2,1) plt.plot(sig); plt.plot(np.linspace(0, 50, filter.size), filter); plt.legend(['Signal', 'Filter']) plt.subplot(2,2,2) plt.plot(filtered); plt.title('Filtered Signal') plt.subplot(2,2,3) plt.plot(correlated); plt.title('Correlated Signal') plt.subplot(2,2,4) plt.plot(backprojected); plt.title('Backprojected Signal') plt.show() .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.conv import Convolve1D from scipy import signal sig = np.zeros(shape=(100,100)) sig[sig.shape[0] // 2 - 2:sig.shape[0] // 2 + 3, sig.shape[1] // 2 - 2:sig.shape[1] // 2 + 3] = 1 filter = signal.hann(50) ConvOp = Convolve1D(size=sig.size, filter=filter, reshape_dims=sig.shape, axis=0) filtered = (ConvOp * sig.reshape(-1)).reshape(sig.shape) plt.figure() plt.subplot(1,2,1) plt.imshow(sig, cmap='plasma'); plt.title('Signal') plt.subplot(1,2,2) plt.imshow(filtered, cmap='plasma'); plt.title('Filtered Signal') plt.show() Notes ----- The ``Convolve1D`` operator applies convolution between the input signal :math:`x(t)` and a compact filter kernel :math:`h(t)` in forward model: .. math:: y(t) = \int_{-\infty}^{\infty} h(t-\tau) x(\tau) d\tau This operation can be discretized as follows .. math:: y[n] = \sum_{m\in\mathbb{Z}} h[n-m] x[m], \, n\in\mathbb{Z}, as well as performed in the frequency domain: .. math:: Y(f) = \mathscr{F} (h(t)) \times \mathscr{F} (x(t)),\; f\in\mathbb{R}. ``Convolve1D`` operator uses :py:func:`scipy.signal.convolve` that automatically chooses the best method for computing the convolution for one dimensional inputs. The FFT implementation :py:func:`scipy.signal.fftconvolve` is however enforced for signals in 2 or more dimensions as this routine efficiently operates on multi-dimensional arrays. The method ``overlapadd`` uses :py:func:`scipy.signal.oaconvolve`. As the adjoint of convolution is correlation, ``Convolve1D`` operator applies correlation in the adjoint mode. In time domain: .. math:: x(t) = \int_{-\infty}^{\infty} h(t+\tau) x(\tau) d\tau or in frequency domain: .. math:: y(t) = \mathscr{F}^{-1} (H(f)^\ast \times X(f)). See Also -------- :py:func:`~pycsou.linop.conv.Convolve2D` """ if (filter.size % 2) == 0: offset = filter.size // 2 - 1 else: offset = filter.size // 2 PyLop = pyconv.Convolve1D(N=size, h=filter, dims=reshape_dims, dir=axis, dtype=dtype, method=method, offset=offset) return PyLopLinearOperator(PyLop)
def Convolve2D(size: int, filter: np.ndarray, shape: tuple, dtype: type = 'float64', method: str = 'fft') -> PyLopLinearOperator: r""" 2D convolution operator. *This docstring was adapted from ``pylops.signalprocessing.Convolve2D``.* Convolve a two-dimensional array with a two-dimensional compact ``filter``. Parameters ---------- size: int Size of the input array. filter: np.ndarray 2d compact filter. The latter should be real-valued and centered around its central indices. shape: tuple Shape of the array to which the convolution should be applied. dtype: str Type of elements of the input array. method: str Method used to calculate the convolution (``direct`` or ``fft``). Returns ------- :py:class:`pycsou.linop.base.PyLopLinearOperator` Convolution operator. Raises ------ ValueError If ``filter`` is not a 2D array. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.conv import Convolve2D from scipy import signal .. doctest:: >>> sig = np.zeros(shape=(100,100)) >>> sig[sig.shape[0] // 2 - 2:sig.shape[0] // 2 + 3, sig.shape[1] // 2 - 2:sig.shape[1] // 2 + 3] = 1 >>> filter = signal.hann(25); filter[filter.size//2:] = 0 >>> filter = filter[None,:] * filter[:,None] >>> ConvOp = Convolve2D(size=sig.size, filter=filter, shape=sig.shape) >>> filtered = (ConvOp * sig.ravel()).reshape(sig.shape) >>> filtered_scipy = signal.convolve(sig, filter, mode='same', method='fft') >>> np.allclose(filtered, filtered_scipy) True .. plot:: import numpy as np import matplotlib.pyplot as plt from pycsou.linop.conv import Convolve2D from scipy import signal sig = np.zeros(shape=(100,100)) sig[sig.shape[0] // 2 - 2:sig.shape[0] // 2 + 3, sig.shape[1] // 2 - 2:sig.shape[1] // 2 + 3] = 1 filter = signal.hann(50) filter = filter[None,:] * filter[:,None] ConvOp = Convolve2D(size=sig.size, filter=filter, shape=sig.shape) filtered = (ConvOp * sig.ravel()).reshape(sig.shape) correlated = (ConvOp.H * sig.ravel()).reshape(sig.shape) plt.figure() plt.subplot(1,3,1) plt.imshow(sig, cmap='plasma'); plt.title('Signal') plt.subplot(1,3,2) plt.imshow(filtered, cmap='plasma'); plt.title('Filtered Signal') plt.subplot(1,3,3) plt.imshow(correlated, cmap='plasma'); plt.title('Correlated Signal') plt.show() Notes ----- The ``Convolve2D`` operator applies two-dimensional convolution between the input signal :math:`d(t,x)` and a compact filter kernel :math:`h(t,x)` in forward model: .. math:: y(t,x) = \int_{-\infty}^{\infty}\int_{-\infty}^{\infty} h(t-\tau,x-\chi) d(\tau,\chi) d\tau d\chi This operation can be discretized as follows .. math:: y[i,n] = \sum_{j=-\infty}^{\infty} \sum_{m=-\infty}^{\infty} h[i-j,n-m] d[j,m] as well as performed in the frequency domain: .. math:: Y(f, k_x) = \mathscr{F} (h(t,x)) \times \mathscr{F} (d(t,x)). ``Convolve2D`` operator uses :py:func:`scipy.signal.convolve` that automatically chooses the best domain for the operation to be carried out. As the adjoint of convolution is correlation, ``Convolve2D`` operator applies correlation in the adjoint mode. In time domain: .. math:: y(t,x) = \int_{-\infty}^{\infty}\int_{-\infty}^{\infty} h(t+\tau,x+\chi) d(\tau,\chi) d\tau d\chi or in frequency domain: .. math:: y(t, x) = \mathscr{F}^{-1} (H(f, k_x)^\ast \times X(f, k_x)). See Also -------- :py:func:`~pycsou.linop.conv.Convolve1D` """ if (filter.shape[0] % 2) == 0: offset0 = filter.shape[0] // 2 - 1 else: offset0 = filter.shape[0] // 2 if (filter.shape[1] % 2) == 0: offset1 = filter.shape[1] // 2 - 1 else: offset1 = filter.shape[1] // 2 offset = (offset0, offset1) PyLop = pyconv.Convolve2D(N=size, h=filter, dims=shape, nodir=None, dtype=dtype, method=method, offset=offset) return PyLopLinearOperator(PyLop)
def SubSampling(size: int, sampling_indices: Union[np.ndarray, list], shape: Optional[tuple] = None, axis: int = 0, dtype: str = 'float64', inplace: bool = True): r""" Subsampling operator. Extract subset of values from input array at locations ``sampling_indices`` in forward mode and place those values at locations ``sampling_indices`` in an otherwise zero array in adjoint mode. Parameters ---------- size : int Size of input array. sampling_indices : :obj:`list` or :obj:`numpy.ndarray` Integer indices of samples for data selection. shape : tuple Shape of input array (``None`` if only one dimension is available). axis : int When ``shape`` is not ``None``, axis along which subsampling is applied. dtype : str Type of elements in input array. inplace : bool Work inplace (``True``) or make a new copy (``False``). By default, data is a reference to the model (in forward) and model is a reference to the data (in adjoint). Returns ------- :py:class:`~pycsou.linop.base.PyLopLinearOperator` The subsampling operator. Raises ------ ValueError If shape and size do not match. Examples -------- .. testsetup:: import numpy as np from pycsou.linop.sampling import SubSampling .. doctest:: >>> x = np.arange(9).reshape(3,3) >>> sampling_indices = [0,2] >>> SamplingOp=SubSampling(size=x.size, sampling_indices=sampling_indices) >>> SamplingOp * x.reshape(-1) array([0, 2]) >>> SamplingOp.adjoint(SamplingOp* x.reshape(-1)).reshape(x.shape) array([[0., 0., 2.], [0., 0., 0.], [0., 0., 0.]]) >>> SamplingOp=SubSampling(size=x.size, sampling_indices=sampling_indices, shape=x.shape, axis=1) >>> (SamplingOp * x.reshape(-1)).reshape(x.shape[1], len(sampling_indices)) array([[0, 2], [3, 5], [6, 8]]) >>> SamplingOp.adjoint(SamplingOp* x.reshape(-1)).reshape(x.shape) array([[0., 0., 2.], [3., 0., 5.], [6., 0., 8.]]) Notes ----- Subsampling of a subset of :math:`L` values at locations ``sampling_indices`` from an input vector :math:`\mathbf{x}` of size :math:`N` can be expressed as: .. math:: y_i = x_{n_i} \quad \forall i=1,2,...,L, where :math:`\mathbf{n}=[n_1, n_2,..., n_L]` is a vector containing the indeces of the original array at which samples are taken. Conversely, in adjoint mode the available values in the data vector :math:`\mathbf{y}` are placed at locations :math:`\mathbf{n}=[n_1, n_2,..., n_L]` in the model vector: .. math:: x_{n_i} = y_i \quad \forall i=1,2,...,L and :math:`x_{j}=0 \,\forall j \neq n_i` (i.e., at all other locations in input vector). See Also -------- :py:class:`~pycsou.linop.sampling.Masking`, :py:class:`~pycsou.linop.sampling.Downsampling` """ PyLop = pylops.Restriction(M=size, iava=sampling_indices, dims=shape, dir=axis, dtype=dtype, inplace=inplace) return PyLopLinearOperator(PyLop=PyLop, is_symmetric=False, is_dense=False, is_sparse=False)