示例#1
0
    def __str__(self):
        """Return ``str(self)``."""
        inner_str = '{}'.format(self.domain)
        dtype_str = dtype_repr(self.out_dtype)

        if self.field == RealNumbers():
            if self.out_dtype == np.dtype('float64'):
                pass
            else:
                inner_str += ', out_dtype={}'.format(dtype_str)

        elif self.field == ComplexNumbers():
            if self.out_dtype == np.dtype('complex128'):
                inner_str += ', field={!r}'.format(self.field)
            else:
                inner_str += ', out_dtype={}'.format(dtype_str)

        else:  # different field, name explicitly
            inner_str += ', field={!r}'.format(self.field)
            inner_str += ', out_dtype={}'.format(dtype_str)

        return '{}({})'.format(self.__class__.__name__, inner_str)
示例#2
0
    def __str__(self):
        """Return ``str(self)``."""
        inner_str = '{}'.format(self.domain)
        dtype_str = dtype_repr(self.out_dtype)

        if self.field == RealNumbers():
            if self.out_dtype == np.dtype('float64'):
                pass
            else:
                inner_str += ', out_dtype={}'.format(dtype_str)

        elif self.field == ComplexNumbers():
            if self.out_dtype == np.dtype('complex128'):
                inner_str += ', field={!r}'.format(self.field)
            else:
                inner_str += ', out_dtype={}'.format(dtype_str)

        else:  # different field, name explicitly
            inner_str += ', field={!r}'.format(self.field)
            inner_str += ', out_dtype={}'.format(dtype_str)

        return '{}({})'.format(self.__class__.__name__, inner_str)
示例#3
0
def fn_impl(request):
    """String with an available `FnBase` implementation name."""
    return request.param

ntuples_impl_params = odl.NTUPLES_IMPLS.keys()
ntuples_impl_ids = [" impl = '{}' ".format(p) for p in ntuples_impl_params]


@fixture(scope="module", ids=ntuples_impl_ids, params=ntuples_impl_params)
def ntuples_impl(request):
    """String with an available `NtuplesBase` implementation name."""
    return request.param


floating_dtype_params = np.sctypes['float'] + np.sctypes['complex']
floating_dtype_ids = [' dtype = {} '.format(dtype_repr(dt))
                      for dt in floating_dtype_params]


@fixture(scope="module", ids=floating_dtype_ids, params=floating_dtype_params)
def floating_dtype(request):
    """Floating point (real or complex) dtype."""
    return request.param


scalar_dtype_params = (floating_dtype_params +
                       np.sctypes['int'] +
                       np.sctypes['uint'])
scalar_dtype_ids = [' dtype = {} '.format(dtype_repr(dt))
                    for dt in scalar_dtype_params]
示例#4
0
def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction):
    """Raise an error if anything is not ok with in and out."""
    if len(set(axes)) != len(axes):
        raise ValueError('duplicate axes are not allowed')

    if direction == 'forward':
        out_shape = list(arr_in.shape)
        if halfcomplex:
            try:
                out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1
            except IndexError:
                raise IndexError('axis index {} out of range for array '
                                 'with {} axes'
                                 ''.format(axes[-1], arr_in.ndim))

        if arr_out.shape != tuple(out_shape):
            raise ValueError('expected output shape {}, got {}'
                             ''.format(tuple(out_shape), arr_out.shape))

        if is_real_dtype(arr_in.dtype):
            out_dtype = complex_dtype(arr_in.dtype)
        elif halfcomplex:
            raise ValueError('cannot combine halfcomplex forward transform '
                             'with complex input')
        else:
            out_dtype = arr_in.dtype

        if arr_out.dtype != out_dtype:
            raise ValueError('expected output dtype {}, got {}'
                             ''.format(dtype_repr(out_dtype),
                                       dtype_repr(arr_out.dtype)))

    elif direction == 'backward':
        in_shape = list(arr_out.shape)
        if halfcomplex:
            try:
                in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1
            except IndexError as err:
                raise IndexError('axis index {} out of range for array '
                                 'with {} axes'
                                 ''.format(axes[-1], arr_out.ndim))

        if arr_in.shape != tuple(in_shape):
            raise ValueError('expected input shape {}, got {}'
                             ''.format(tuple(in_shape), arr_in.shape))

        if is_real_dtype(arr_out.dtype):
            in_dtype = complex_dtype(arr_out.dtype)
        elif halfcomplex:
            raise ValueError('cannot combine halfcomplex backward transform '
                             'with complex output')
        else:
            in_dtype = arr_out.dtype

        if arr_in.dtype != in_dtype:
            raise ValueError('expected input dtype {}, got {}'
                             ''.format(dtype_repr(in_dtype),
                                       dtype_repr(arr_in.dtype)))

    else:  # Shouldn't happen
        raise RuntimeError
示例#5
0
 def __repr__(self):
     """Return ``repr(self)``."""
     return '{}({}, {})'.format(self.__class__.__name__, self.size,
                                dtype_repr(self.dtype))
示例#6
0
    return request.param


ntuples_impl_params = odl.NTUPLES_IMPLS.keys()
ntuples_impl_ids = [" impl = '{}' ".format(p) for p in ntuples_impl_params]


@fixture(scope="module", ids=ntuples_impl_ids, params=ntuples_impl_params)
def ntuples_impl(request):
    """String with an available `NtuplesBase` implementation name."""
    return request.param


floating_dtype_params = np.sctypes['float'] + np.sctypes['complex']
floating_dtype_ids = [
    ' dtype = {} '.format(dtype_repr(dt)) for dt in floating_dtype_params
]


@fixture(scope="module", ids=floating_dtype_ids, params=floating_dtype_params)
def floating_dtype(request):
    """Floating point (real or complex) dtype."""
    return request.param


scalar_dtype_params = (floating_dtype_params + np.sctypes['int'] +
                       np.sctypes['uint'])
scalar_dtype_ids = [
    ' dtype = {} '.format(dtype_repr(dt)) for dt in scalar_dtype_params
]
示例#7
0
def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction):
    """Raise an error if anything is not ok with in and out."""
    if len(set(axes)) != len(axes):
        raise ValueError('duplicate axes are not allowed')

    if direction == 'forward':
        out_shape = list(arr_in.shape)
        if halfcomplex:
            try:
                out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1
            except IndexError as err:
                raise_from(IndexError('axis index {} out of range for array '
                                      'with {} axes'
                                      ''.format(axes[-1], arr_in.ndim)),
                           err)

        if arr_out.shape != tuple(out_shape):
            raise ValueError('expected output shape {}, got {}'
                             ''.format(tuple(out_shape), arr_out.shape))

        if is_real_dtype(arr_in.dtype):
            out_dtype = complex_dtype(arr_in.dtype)
        elif halfcomplex:
            raise ValueError('cannot combine halfcomplex forward transform '
                             'with complex input')
        else:
            out_dtype = arr_in.dtype

        if arr_out.dtype != out_dtype:
            raise ValueError('expected output dtype {}, got {}'
                             ''.format(dtype_repr(out_dtype),
                                       dtype_repr(arr_out.dtype)))

    elif direction == 'backward':
        in_shape = list(arr_out.shape)
        if halfcomplex:
            try:
                in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1
            except IndexError as err:
                raise_from(IndexError('axis index {} out of range for array '
                                      'with {} axes'
                                      ''.format(axes[-1], arr_out.ndim)),
                           err)

        if arr_in.shape != tuple(in_shape):
            raise ValueError('expected input shape {}, got {}'
                             ''.format(tuple(in_shape), arr_in.shape))

        if is_real_dtype(arr_out.dtype):
            in_dtype = complex_dtype(arr_out.dtype)
        elif halfcomplex:
            raise ValueError('cannot combine halfcomplex backward transform '
                             'with complex output')
        else:
            in_dtype = arr_out.dtype

        if arr_in.dtype != in_dtype:
            raise ValueError('expected input dtype {}, got {}'
                             ''.format(dtype_repr(in_dtype),
                                       dtype_repr(arr_in.dtype)))

    else:  # Shouldn't happen
        raise RuntimeError
示例#8
0
文件: ft_utils.py 项目: TC-18/odl
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
                     **kwargs):
    """Return the range of the Fourier transform on ``space``.

    Parameters
    ----------
    space : `DiscreteLp`
        Real space whose reciprocal is calculated. It must be
        uniformly discretized.
    axes : sequence of ints, optional
        Dimensions along which the Fourier transform is taken.
        Default: all axes
    halfcomplex : bool, optional
        If ``True``, take only the negative frequency part along the last
        axis for. For ``False``, use the full frequency space.
        This option can only be used if ``space`` is a space of
        real-valued functions.
    shift : bool or sequence of bools, optional
        If ``True``, the reciprocal grid is shifted by half a stride in
        the negative direction. With a boolean sequence, this option
        is applied separately to each axis.
        If a sequence is provided, it must have the same length as
        ``axes`` if supplied. Note that this must be set to ``True``
        in the halved axis in half-complex transforms.
        Default: ``True``
    impl : string, optional
        Implementation back-end for the created space.
        Default: ``'numpy'``
    exponent : float, optional
        Create a space with this exponent. By default, the conjugate
        exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
        used, where ``q = inf`` for ``p = 1`` and vice versa.
    dtype : optional
        Complex data type of the created space. By default, the
        complex counterpart of ``space.dtype`` is used.

    Returns
    -------
    rspace : `DiscreteLp`
        Reciprocal of the input ``space``. If ``halfcomplex=True``, the
        upper end of the domain (where the half space ends) is chosen to
        coincide with the grid node.
    """
    if not isinstance(space, DiscreteLp):
        raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
                        ''.format(space))
    if axes is None:
        axes = tuple(range(space.ndim))
    axes = normalized_axes_tuple(axes, space.ndim)

    if not all(space.is_uniform_byaxis[axis] for axis in axes):
        raise ValueError('`space` is not uniformly discretized in the '
                         '`axes` of the transform')

    if halfcomplex and space.field != RealNumbers():
        raise ValueError('`halfcomplex` option can only be used with real '
                         'spaces')

    exponent = kwargs.pop('exponent', None)
    if exponent is None:
        exponent = conj_exponent(space.exponent)

    dtype = kwargs.pop('dtype', None)
    if dtype is None:
        dtype = complex_dtype(space.dtype)
    else:
        if not is_complex_floating_dtype(dtype):
            raise ValueError('{} is not a complex data type'
                             ''.format(dtype_repr(dtype)))

    impl = kwargs.pop('impl', 'numpy')

    # Calculate range
    recip_grid = reciprocal_grid(space.grid, shift=shift,
                                 halfcomplex=halfcomplex, axes=axes)

    # Make a partition with nodes on the boundary in the last transform axis
    # if `halfcomplex == True`, otherwise a standard partition.
    if halfcomplex:
        max_pt = {axes[-1]: recip_grid.max_pt[axes[-1]]}
        part = uniform_partition_fromgrid(recip_grid, max_pt=max_pt)
    else:
        part = uniform_partition_fromgrid(recip_grid)

    # Use convention of adding a hat to represent fourier transform of variable
    axis_labels = list(space.axis_labels)
    for i in axes:
        # Avoid double math
        label = axis_labels[i].replace('$', '')
        axis_labels[i] = '$\^{{{}}}$'.format(label)

    recip_spc = uniform_discr_frompartition(part, exponent=exponent,
                                            dtype=dtype, impl=impl,
                                            axis_labels=axis_labels)

    return recip_spc
示例#9
0
文件: ft_utils.py 项目: TC-18/odl
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes,
                         interp, sign='-', op='multiply', out=None):
    """Post-process the Fourier-space data after DFT.

    This function multiplies the given data with the separable
    function::

        q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar)

    where ``x[0]`` and ``s`` are the minimum point and the stride of
    the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT
    of the interpolation kernel. The sign of the exponent depends on the
    choice of ``sign``. Note that for ``op='divide'`` the
    multiplication with ``s * phi_hat(xi_bar)`` is replaced by a
    division with the same array.

    In discretized form on the reciprocal grid, the exponential part
    of this function becomes an array::

        q[k] = exp(+- 1j * dot(x[0], xi[k]))

    and the arguments ``xi_bar`` to the interpolation kernel
    are the normalized frequencies::

        for 'shift=True'  : xi_bar[k] = -pi + pi * (2*k) / N
        for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N

    See [Pre+2007], Section 13.9 "Computing Fourier Integrals Using
    the FFT" for a similar approach.

    Parameters
    ----------
    arr : `array-like`
        Array to be pre-processed. An array with real data type is
        converted to its complex counterpart.
    real_grid : uniform `RectGrid`
        Real space grid in the transform.
    recip_grid : uniform `RectGrid`
        Reciprocal grid in the transform
    shift : bool or sequence of bools
        If ``True``, the grid is shifted by half a stride in the negative
        direction in the corresponding axes. The sequence must have the
        same length as ``axes``.
    axes : int or sequence of ints
        Dimensions along which to take the transform. The sequence must
        have the same length as ``shifts``.
    interp : string or sequence of strings
        Interpolation scheme used in the real-space.
    sign : {'-', '+'}, optional
        Sign of the complex exponent.
    op : {'multiply', 'divide'}, optional
        Operation to perform with the stride times the interpolation
        kernel FT
    out : `numpy.ndarray`, optional
        Array in which the result is stored. If ``out is arr``, an
        in-place modification is performed.

    Returns
    -------
    out : `numpy.ndarray`
        Result of the post-processing. If ``out`` was given, the returned
        object is a reference to it.

    References
    ----------
    [Pre+2007] Press, W H, Teukolsky, S A, Vetterling, W T, and Flannery, B P.
    *Numerical Recipes in C - The Art of Scientific Computing* (Volume 3).
    Cambridge University Press, 2007.
    """
    arr = np.asarray(arr)
    if is_real_floating_dtype(arr.dtype):
        arr = arr.astype(complex_dtype(arr.dtype))
    elif not is_complex_floating_dtype(arr.dtype):
        raise ValueError('array data type {} is not a complex floating point '
                         'data type'.format(dtype_repr(arr.dtype)))

    if out is None:
        out = arr.copy()
    elif out is not arr:
        out[:] = arr

    if axes is None:
        axes = list(range(arr.ndim))
    else:
        try:
            axes = [int(axes)]
        except TypeError:
            axes = list(axes)

    shift_list = normalized_scalar_param_list(shift, length=len(axes),
                                              param_conv=bool)

    if sign == '-':
        imag = -1j
    elif sign == '+':
        imag = 1j
    else:
        raise ValueError("`sign` '{}' not understood".format(sign))

    op, op_in = str(op).lower(), op
    if op not in ('multiply', 'divide'):
        raise ValueError("kernel `op` '{}' not understood".format(op_in))

    # Make a list from interp if that's not the case already
    try:
        # Duck-typed string check
        interp + ''
    except TypeError:
        pass
    else:
        interp = [str(interp).lower()] * arr.ndim

    onedim_arrs = []
    for ax, shift, intp in zip(axes, shift_list, interp):
        x = real_grid.min_pt[ax]
        xi = recip_grid.coord_vectors[ax]

        # First part: exponential array
        onedim_arr = np.exp(imag * x * xi)

        # Second part: interpolation kernel
        len_dft = recip_grid.shape[ax]
        len_orig = real_grid.shape[ax]
        halfcomplex = (len_dft < len_orig)
        odd = len_orig % 2

        fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig)
        if halfcomplex:
            # maximum lies around 0, possibly half a cell left or right of it
            if shift and odd:
                fmax = - 1.0 / (2 * len_orig)
            elif not shift and not odd:
                fmax = 1.0 / (2 * len_orig)
            else:
                fmax = 0.0

        else:  # not halfcomplex
            # maximum lies close to 0.5, half or full cell left of it
            if shift:
                # -0.5 + (N-1)/N = 0.5 - 1/N
                fmax = 0.5 - 1.0 / len_orig
            else:
                # -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N)
                fmax = 0.5 - 1.0 / (2 * len_orig)

        freqs = np.linspace(fmin, fmax, num=len_dft)
        stride = real_grid.stride[ax]

        interp_kernel = _interp_kernel_ft(freqs, intp)
        interp_kernel *= stride

        if op == 'multiply':
            onedim_arr *= interp_kernel
        else:
            onedim_arr /= interp_kernel

        onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False))

    fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
    return out
示例#10
0
文件: ft_utils.py 项目: TC-18/odl
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None):
    """Pre-process the real-space data before DFT.

    This function multiplies the given data with the separable
    function::

        p(x) = exp(+- 1j * dot(x - x[0], xi[0]))

    where ``x[0]`` and ``xi[0]`` are the minimum coodinates of
    the real-space and reciprocal grids, respectively. The sign of
    the exponent depends on the choice of ``sign``. In discretized
    form, this function becomes an array::

        p[k] = exp(+- 1j * k * s * xi[0])

    If the reciprocal grid is not shifted, i.e. symmetric around 0,
    it is ``xi[0] =  pi/s * (-1 + 1/N)``, hence::

        p[k] = exp(-+ 1j * pi * k * (1 - 1/N))

    For a shifted grid, we have :math:``xi[0] =  -pi/s``, thus the
    array is given by::

        p[k] = (-1)**k

    Parameters
    ----------
    arr : `array-like`
        Array to be pre-processed. If its data type is a real
        non-floating type, it is converted to 'float64'.
    shift : bool or or sequence of bools, optional
        If ``True``, the grid is shifted by half a stride in the negative
        direction. With a sequence, this option is applied separately on
        each axis.
    axes : int or sequence of ints, optional
        Dimensions in which to calculate the reciprocal. The sequence
        must have the same length as ``shift`` if the latter is given
        as a sequence.
        Default: all axes.
    sign : {'-', '+'}, optional
        Sign of the complex exponent.
    out : `numpy.ndarray`, optional
        Array in which the result is stored. If ``out is arr``,
        an in-place modification is performed. For real data type,
        this is only possible for ``shift=True`` since the factors are
        complex otherwise.

    Returns
    -------
    out : `numpy.ndarray`
        Result of the pre-processing. If ``out`` was given, the returned
        object is a reference to it.

    Notes
    -----
    If ``out`` is not specified, the data type of the returned array
    is the same as that of ``arr`` except when ``arr`` has real data
    type and ``shift`` is not ``True``. In this case, the return type
    is the complex counterpart of ``arr.dtype``.
    """
    arr = np.asarray(arr)
    if not is_scalar_dtype(arr.dtype):
        raise ValueError('array has non-scalar data type {}'
                         ''.format(dtype_repr(arr.dtype)))
    elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype):
        arr = arr.astype('float64')

    if axes is None:
        axes = list(range(arr.ndim))
    else:
        try:
            axes = [int(axes)]
        except TypeError:
            axes = list(axes)

    shape = arr.shape
    shift_list = normalized_scalar_param_list(shift, length=len(axes),
                                              param_conv=bool)

    # Make a copy of arr with correct data type if necessary, or copy values.
    if out is None:
        if is_real_dtype(arr.dtype) and not all(shift_list):
            out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True)
        else:
            out = arr.copy()
    else:
        out[:] = arr

    if is_real_dtype(out.dtype) and not shift:
        raise ValueError('cannot pre-process real input in-place without '
                         'shift')

    if sign == '-':
        imag = -1j
    elif sign == '+':
        imag = 1j
    else:
        raise ValueError("`sign` '{}' not understood".format(sign))

    def _onedim_arr(length, shift):
        if shift:
            # (-1)^indices
            factor = np.ones(length, dtype=out.dtype)
            factor[1::2] = -1
        else:
            factor = np.arange(length, dtype=out.dtype)
            factor *= -imag * np.pi * (1 - 1.0 / length)
            np.exp(factor, out=factor)
        return factor.astype(out.dtype, copy=False)

    onedim_arrs = []
    for axis, shift in zip(axes, shift_list):
        length = shape[axis]
        onedim_arrs.append(_onedim_arr(length, shift))

    fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
    return out
示例#11
0
 def __repr__(self):
     """Return ``repr(self)``."""
     return '{}({}, {})'.format(self.__class__.__name__, self.size,
                                dtype_repr(self.dtype))
示例#12
0
def reduce_over_partition(discr_func,
                          partition,
                          reduction,
                          pad_const=0,
                          out=None):
    """Reduce a discrete function blockwise over a coarser partition.

    This helper function is intended as a helper for multi-grid
    computations where a finely discretized function needs to undergo
    a blockwise reduction operation over a coarser partition of a
    containing spatial region. An example is to average the given
    function over larger blocks as defined by the partition.

    Parameters
    ----------
    discr_func : `DiscreteLpElement`
        Element in a uniformly discretized function space that is to be
        reduced over blocks defined by ``partition``.
    partition : uniform `RectPartition`
        Coarser partition than ``discr_func.space.partition`` that defines
        the large cells (blocks) over which ``discr_func`` is reduced.
        Its ``cell_sides`` must be an integer multiple of
        ``discr_func.space.cell_sides``.
    reduction : callable
        Reduction function defining the operation on each block of values
        in ``discr_func``. It needs to be callable as
        ``reduction(array, axes=my_axes)`` or
        ``reduction(array, axes=my_axes, out=out_array)``, where
        ``array, out_array`` are `numpy.ndarray`'s, and ``my_axes`` are
        sequence of ints specifying over which axes is being reduced.
        The typical examples are NumPy reductions like `np.sum` or `np.mean`,
        but custom functions are also possible.
    pad_const : scalar, optional
        This value is filled into the parts that are not covered by the
        function.
    out : `numpy.ndarray`, optional
        Array to which the output is written. It needs to have the same
        ``shape`` as ``partition`` and a ``dtype`` to which
        ``discr_func.dtype`` can be cast.

    Returns
    -------
    out : `numpy.ndarray`
        Array holding the result of the reduction operation. If ``out``
        was given, the returned object is a reference to it.

    Examples
    --------
    Consider a simple 1D example with 4 small cells per large cell,
    and summing a constant function over the large cells:

    >>> partition = odl.uniform_partition(0, 1, 5)
    >>> partition.cell_boundary_vecs
    (array([ 0. ,  0.2,  0.4,  0.6,  0.8,  1. ]),)
    >>> space = odl.uniform_discr(0, 0.5, 10)  # 0.5 falls between
    >>> func = space.one()
    >>> reduce_over_partition(func, partition, reduction=np.sum)
    array([ 4.,  4.,  2.,  0.,  0.])
    >>> # The value 4 is due to summing 4 ones from 4 small cells per
    >>> # large cell.
    >>> # The "2" in the third cell is expected since it only counts half --
    >>> # the overlap of func.domain is only half a cell ([0.4, 0.5]).

    In 2D, everything (including partial overlap weighting) works per
    axis:

    >>> partition = odl.uniform_partition([0, 0], [1, 1], [5, 5])
    >>> space = odl.uniform_discr([0, 0], [0.5, 0.7], [10, 14])
    >>> func = space.one()
    >>> reduce_over_partition(func, partition, reduction=np.sum)
    array([[ 16.,  16.,  16.,   8.,   0.],
           [ 16.,  16.,  16.,   8.,   0.],
           [  8.,   8.,   8.,   4.,   0.],
           [  0.,   0.,   0.,   0.,   0.],
           [  0.,   0.,   0.,   0.,   0.]])
    >>> # 16 = sum of 16 ones from 4 x 4 small cells per large cell
    >>> # 8: cells have half weight due to half overlap
    >>> # 4: the corner cell overlaps half in both axes, i.e. 1/4 in
    >>> # total
    """
    if not isinstance(discr_func, DiscreteLpElement):
        raise TypeError('`discr_func` must be a `DiscreteLpElement` instance, '
                        'got {!r}'.format(discr_func))
    if not discr_func.space.is_uniform:
        raise ValueError('`discr_func.space` is not uniformly discretized')
    if not isinstance(partition, RectPartition):
        raise TypeError('`partition` must be a `RectPartition` instance, '
                        'got {!r}'.format(partition))
    if not partition.is_uniform:
        raise ValueError('`partition` is not uniform')

    # TODO: use different eps in each axis?
    dom_eps = 1e-8 * max(discr_func.space.partition.extent)
    if not partition.set.contains_set(discr_func.space.domain, atol=dom_eps):
        raise ValueError('`partition.set` {} does not contain '
                         '`discr_func.space.domain` {}'
                         ''.format(partition.set, discr_func.space.domain))

    if out is None:
        out = np.empty(partition.shape,
                       dtype=discr_func.dtype,
                       order=discr_func.dtype)
    if not isinstance(out, np.ndarray):
        raise TypeError('`out` must be a `numpy.ndarray` instance, got '
                        '{!r}'.format(out))
    if not np.can_cast(discr_func.dtype, out.dtype):
        raise ValueError('cannot safely cast from `discr_func.dtype` {} '
                         'to `out.dtype` {}'
                         ''.format(dtype_repr(discr_func.dtype),
                                   dtype_repr(out.dtype)))
    if not np.array_equal(out.shape, partition.shape):
        raise ValueError('`out.shape` differs from `partition.shape` '
                         '({} != {})'.format(out.shape, partition.shape))
    if not np.can_cast(pad_const, out.dtype):
        raise ValueError('cannot safely cast `pad_const` {} '
                         'to `out.dtype` {}'
                         ''.format(pad_const, dtype_repr(out.dtype)))
    out.fill(pad_const)

    # Some abbreviations for easier notation
    # All variables starting with "s" refer to properties of
    # `discr_func.space`, whereas "p" quantities refer to the (coarse)
    # `partition`.
    spc = discr_func.space
    smin, smax = spc.min_pt, spc.max_pt
    scsides = spc.cell_sides
    part = partition
    pmin = part.min_pt, part.max_pt
    func_arr = discr_func.asarray()
    ndim = spc.ndim

    # Partition cell sides must be an integer multiple of space cell sides
    csides_ratio_f = part.cell_sides / spc.cell_sides
    csides_ratio = np.around(csides_ratio_f).astype(int)
    if not np.allclose(csides_ratio_f, csides_ratio):
        raise ValueError('`partition.cell_sides` is a non-integer multiple '
                         '({}) of `discr_func.space.cell_sides'
                         ''.format(csides_ratio_f))

    # Shift must be an integer multiple of space cell sides
    rel_shift_f = (smin - pmin) / scsides
    if not np.allclose(np.round(rel_shift_f), rel_shift_f):
        raise ValueError('shift between `partition` and `discr_func.space` '
                         'is a non-integer multiple ({}) of '
                         '`discr_func.space.cell_sides'
                         ''.format(rel_shift_f))

    # Calculate relative position of a number of interesting points

    # Positions of the space domain min and max vectors relative to the
    # partition
    cvecs = part.cell_boundary_vecs
    smin_idx = np.array(part.index(smin), ndmin=1)
    smin_partpt = np.array([cvec[si + 1] for si, cvec in zip(smin_idx, cvecs)])
    smax_idx = np.array(part.index(smax), ndmin=1)
    smax_partpt = np.array([cvec[si] for si, cvec in zip(smax_idx, cvecs)])

    # Inner part of the partition in the space domain, i.e. partition cells
    # that are completely contained in the spatial domain and do not touch
    # its boundary
    p_inner_slc = [slice(li + 1, ri) for li, ri in zip(smin_idx, smax_idx)]

    # Positions of the first and last partition points that still lie in
    # the spatial domain, relative to the space partition
    pl_idx = np.array(np.round(spc.partition.index(smin_partpt,
                                                   floating=True)).astype(int),
                      ndmin=1)
    pr_idx = np.array(np.round(spc.partition.index(smax_partpt,
                                                   floating=True)).astype(int),
                      ndmin=1)
    s_inner_slc = [slice(li, ri) for li, ri in zip(pl_idx, pr_idx)]

    # Slices to constrain to left and right boundary in each axis
    pl_slc = [slice(li, li + 1) for li in smin_idx]
    pr_slc = [slice(ri, ri + 1) for ri in smax_idx]

    # Slices for the overlapping space cells to the left and the right
    # (up to left index excl. / from right index incl.)
    sl_slc = [slice(None, li) for li in pl_idx]
    sr_slc = [slice(ri, None) for ri in pr_idx]

    # Shapes for reduction of the inner part by summing over axes.
    reduce_inner_shape = []
    reduce_axes = tuple(2 * i + 1 for i in range(ndim))
    inner_shape = func_arr[s_inner_slc].shape
    for n, k in zip(inner_shape, csides_ratio):
        reduce_inner_shape.extend([n // k, k])

    # Now we loop over boundary parts of all dimensions from 0 to ndim-1.
    # They are encoded as follows:
    # - We select inner (1) and outer (2) parts per axis by looping over
    #   `product([1, 2], repeat=ndim)`, using the name `parts`.
    # - Wherever there is a 2 in the sequence, 2 slices must be generated,
    #   one for left and one for right. The total number of slices is the
    #   product of the numbers in `parts`, i.e. `num_slcs = prod(parts)`.
    # - We get the indices of the 2's in the sequence and put them in
    #   `outer_indcs`.
    # - The "p" and "s" slice lists are initialized with the inner parts.
    #   We need `num_slcs` such lists for this particular sequence `parts`.
    # - Now we enumerate `outer_indcs` as `i, oi` and put into the
    #   (2*i)-th entry of the slice lists the "left" outer slice and into
    #   the (2*i+1)-th entry the "right" outer slice.
    #
    # The total number of slices to loop over is equal to
    # sum(k=0->ndim, binom(ndim, k) * 2^k) = 3^ndim.
    # This should not add too much computational overhead.
    for parts in product([1, 2], repeat=ndim):

        # Number of slices to consider
        num_slcs = np.prod(parts)

        # Indices where we need to consider the outer parts
        outer_indcs = tuple(np.where(np.equal(parts, 2))[0])

        # Initialize the "p" and "s" slice lists with the inner slices.
        # Each list contains `num_slcs` of those.
        p_slcs = [list(p_inner_slc) for _ in range(num_slcs)]
        s_slcs = [list(s_inner_slc) for _ in range(num_slcs)]
        # Put the left/right slice in the even/odd sublists at the
        # position indexed by the outer_indcs thing.
        # We also need to initialize the `reduce_shape`'s for all cases,
        # which has the value (n // k, k) for the "inner" axes and
        # (1, n) in the "outer" axes.
        reduce_shapes = [list(reduce_inner_shape) for _ in range(num_slcs)]
        for islc, bdry in enumerate(product('lr', repeat=len(outer_indcs))):
            for oi, l_or_r in zip(outer_indcs, bdry):
                if l_or_r == 'l':
                    p_slcs[islc][oi] = pl_slc[oi]
                    s_slcs[islc][oi] = sl_slc[oi]
                else:
                    p_slcs[islc][oi] = pr_slc[oi]
                    s_slcs[islc][oi] = sr_slc[oi]

            f_view = func_arr[s_slcs[islc]]
            for oi in outer_indcs:
                reduce_shapes[islc][2 * oi] = 1
                reduce_shapes[islc][2 * oi + 1] = f_view.shape[oi]

        # Compute the block reduction of all views represented by the current
        # `parts`. This is done by reshaping from the original shape to the
        # above calculated `reduce_shapes` and reducing over `reduce_axes`.
        for p_s, s_s, red_shp in zip(p_slcs, s_slcs, reduce_shapes):
            f_view = func_arr[s_s]
            out_view = out[p_s]

            if 0 not in f_view.shape:
                # View not empty, reduction makes sense
                _apply_reduction(arr=f_view.reshape(red_shp),
                                 out=out_view,
                                 axes=reduce_axes,
                                 reduction=reduction)
    return out
示例#13
0
文件: ft_utils.py 项目: odlgroup/odl
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
                     **kwargs):
    """Return the range of the Fourier transform on ``space``.

    Parameters
    ----------
    space : `DiscreteLp`
        Real space whose reciprocal is calculated. It must be
        uniformly discretized.
    axes : sequence of ints, optional
        Dimensions along which the Fourier transform is taken.
        Default: all axes
    halfcomplex : bool, optional
        If ``True``, take only the negative frequency part along the last
        axis for. For ``False``, use the full frequency space.
        This option can only be used if ``space`` is a space of
        real-valued functions.
    shift : bool or sequence of bools, optional
        If ``True``, the reciprocal grid is shifted by half a stride in
        the negative direction. With a boolean sequence, this option
        is applied separately to each axis.
        If a sequence is provided, it must have the same length as
        ``axes`` if supplied. Note that this must be set to ``True``
        in the halved axis in half-complex transforms.
        Default: ``True``
    impl : string, optional
        Implementation back-end for the created space.
        Default: ``'numpy'``
    exponent : float, optional
        Create a space with this exponent. By default, the conjugate
        exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
        used, where ``q = inf`` for ``p = 1`` and vice versa.
    dtype : optional
        Complex data type of the created space. By default, the
        complex counterpart of ``space.dtype`` is used.

    Returns
    -------
    rspace : `DiscreteLp`
        Reciprocal of the input ``space``. If ``halfcomplex=True``, the
        upper end of the domain (where the half space ends) is chosen to
        coincide with the grid node.
    """
    if not isinstance(space, DiscreteLp):
        raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
                        ''.format(space))
    if not space.is_uniform:
        raise ValueError('`space` is not uniformly discretized')

    if axes is None:
        axes = tuple(range(space.ndim))

    axes = normalized_axes_tuple(axes, space.ndim)

    if halfcomplex and space.field != RealNumbers():
        raise ValueError('`halfcomplex` option can only be used with real '
                         'spaces')

    exponent = kwargs.pop('exponent', None)
    if exponent is None:
        exponent = conj_exponent(space.exponent)

    dtype = kwargs.pop('dtype', None)
    if dtype is None:
        dtype = complex_dtype(space.dtype)
    else:
        if not is_complex_floating_dtype(dtype):
            raise ValueError('{} is not a complex data type'
                             ''.format(dtype_repr(dtype)))

    impl = kwargs.pop('impl', 'numpy')

    # Calculate range
    recip_grid = reciprocal_grid(space.grid, shift=shift,
                                 halfcomplex=halfcomplex, axes=axes)

    # Make a partition with nodes on the boundary in the last transform axis
    # if `halfcomplex == True`, otherwise a standard partition.
    if halfcomplex:
        max_pt = {axes[-1]: recip_grid.max_pt[axes[-1]]}
        part = uniform_partition_fromgrid(recip_grid, max_pt=max_pt)
    else:
        part = uniform_partition_fromgrid(recip_grid)

    # Use convention of adding a hat to represent fourier transform of variable
    axis_labels = list(space.axis_labels)
    for i in axes:
        # Avoid double math
        label = axis_labels[i].replace('$', '')
        axis_labels[i] = '$\^{{{}}}$'.format(label)

    recip_spc = uniform_discr_frompartition(part, exponent=exponent,
                                            dtype=dtype, impl=impl,
                                            axis_labels=axis_labels)

    return recip_spc
示例#14
0
文件: ft_utils.py 项目: odlgroup/odl
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes,
                         interp, sign='-', op='multiply', out=None):
    """Post-process the Fourier-space data after DFT.

    This function multiplies the given data with the separable
    function::

        q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar)

    where ``x[0]`` and ``s`` are the minimum point and the stride of
    the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT
    of the interpolation kernel. The sign of the exponent depends on the
    choice of ``sign``. Note that for ``op='divide'`` the
    multiplication with ``s * phi_hat(xi_bar)`` is replaced by a
    division with the same array.

    In discretized form on the reciprocal grid, the exponential part
    of this function becomes an array::

        q[k] = exp(+- 1j * dot(x[0], xi[k]))

    and the arguments ``xi_bar`` to the interpolation kernel
    are the normalized frequencies::

        for 'shift=True'  : xi_bar[k] = -pi + pi * (2*k) / N
        for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N

    See [Pre+2007]_, Section 13.9 "Computing Fourier Integrals Using
    the FFT" for a similar approach.

    Parameters
    ----------
    arr : `array-like`
        Array to be pre-processed. An array with real data type is
        converted to its complex counterpart.
    real_grid : `RegularGrid`
        Real space grid in the transform
    recip_grid : `RegularGrid`
        Reciprocal grid in the transform
    shift : bool or sequence of bools
        If ``True``, the grid is shifted by half a stride in the negative
        direction in the corresponding axes. The sequence must have the
        same length as ``axes``.
    axes : int or sequence of ints
        Dimensions along which to take the transform. The sequence must
        have the same length as ``shifts``.
    interp : string or sequence of strings
        Interpolation scheme used in the real-space.
    sign : {'-', '+'}, optional
        Sign of the complex exponent.
    op : {'multiply', 'divide'}
        Operation to perform with the stride times the interpolation
        kernel FT
    out : `numpy.ndarray`, optional
        Array in which the result is stored. If ``out is arr``, an
        in-place modification is performed.

    Returns
    -------
    out : `numpy.ndarray`
        Result of the post-processing. If ``out`` was given, the returned
        object is a reference to it.
    """
    arr = np.asarray(arr)
    if is_real_floating_dtype(arr.dtype):
        arr = arr.astype(complex_dtype(arr.dtype))
    elif not is_complex_floating_dtype(arr.dtype):
        raise ValueError('array data type {} is not a complex floating point '
                         'data type'.format(dtype_repr(arr.dtype)))

    if out is None:
        out = arr.copy()
    elif out is not arr:
        out[:] = arr

    if axes is None:
        axes = list(range(arr.ndim))
    else:
        try:
            axes = [int(axes)]
        except TypeError:
            axes = list(axes)

    shift_list = normalized_scalar_param_list(shift, length=len(axes),
                                              param_conv=bool)

    if sign == '-':
        imag = -1j
    elif sign == '+':
        imag = 1j
    else:
        raise ValueError("`sign` '{}' not understood".format(sign))

    op, op_in = str(op).lower(), op
    if op not in ('multiply', 'divide'):
        raise ValueError("kernel `op` '{}' not understood".format(op_in))

    # Make a list from interp if that's not the case already
    try:
        # Duck-typed string check
        interp + ''
    except TypeError:
        pass
    else:
        interp = [str(interp).lower()] * arr.ndim

    onedim_arrs = []
    for ax, shift, intp in zip(axes, shift_list, interp):
        x = real_grid.min_pt[ax]
        xi = recip_grid.coord_vectors[ax]

        # First part: exponential array
        onedim_arr = np.exp(imag * x * xi)

        # Second part: interpolation kernel
        len_dft = recip_grid.shape[ax]
        len_orig = real_grid.shape[ax]
        halfcomplex = (len_dft < len_orig)
        odd = len_orig % 2

        fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig)
        if halfcomplex:
            # maximum lies around 0, possibly half a cell left or right of it
            if shift and odd:
                fmax = - 1.0 / (2 * len_orig)
            elif not shift and not odd:
                fmax = 1.0 / (2 * len_orig)
            else:
                fmax = 0.0

        else:  # not halfcomplex
            # maximum lies close to 0.5, half or full cell left of it
            if shift:
                # -0.5 + (N-1)/N = 0.5 - 1/N
                fmax = 0.5 - 1.0 / len_orig
            else:
                # -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N)
                fmax = 0.5 - 1.0 / (2 * len_orig)

        freqs = np.linspace(fmin, fmax, num=len_dft)
        stride = real_grid.stride[ax]

        if op == 'multiply':
            onedim_arr *= stride * _interp_kernel_ft(freqs, intp)
        else:
            onedim_arr /= stride * _interp_kernel_ft(freqs, intp)

        onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False))

    fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
    return out
示例#15
0
文件: ft_utils.py 项目: odlgroup/odl
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None):
    """Pre-process the real-space data before DFT.

    This function multiplies the given data with the separable
    function::

        p(x) = exp(+- 1j * dot(x - x[0], xi[0]))

    where ``x[0]`` and ``xi[0]`` are the minimum coodinates of
    the real-space and reciprocal grids, respectively. The sign of
    the exponent depends on the choice of ``sign``. In discretized
    form, this function becomes an array::

        p[k] = exp(+- 1j * k * s * xi[0])

    If the reciprocal grid is not shifted, i.e. symmetric around 0,
    it is ``xi[0] =  pi/s * (-1 + 1/N)``, hence::

        p[k] = exp(-+ 1j * pi * k * (1 - 1/N))

    For a shifted grid, we have :math:``xi[0] =  -pi/s``, thus the
    array is given by::

        p[k] = (-1)**k

    Parameters
    ----------
    arr : `array-like`
        Array to be pre-processed. If its data type is a real
        non-floating type, it is converted to 'float64'.
    shift : bool or or sequence of bools, optional
        If ``True``, the grid is shifted by half a stride in the negative
        direction. With a sequence, this option is applied separately on
        each axis.
    axes : int or sequence of ints, optional
        Dimensions in which to calculate the reciprocal. The sequence
        must have the same length as ``shift`` if the latter is given
        as a sequence.
        Default: all axes.
    sign : {'-', '+'}, optional
        Sign of the complex exponent.
    out : `numpy.ndarray`, optional
        Array in which the result is stored. If ``out is arr``,
        an in-place modification is performed. For real data type,
        this is only possible for ``shift=True`` since the factors are
        complex otherwise.

    Returns
    -------
    out : `numpy.ndarray`
        Result of the pre-processing. If ``out`` was given, the returned
        object is a reference to it.

    Notes
    -----
    If ``out`` is not specified, the data type of the returned array
    is the same as that of ``arr`` except when ``arr`` has real data
    type and ``shift`` is not ``True``. In this case, the return type
    is the complex counterpart of ``arr.dtype``.
    """
    arr = np.asarray(arr)
    if not is_scalar_dtype(arr.dtype):
        raise ValueError('array has non-scalar data type {}'
                         ''.format(dtype_repr(arr.dtype)))
    elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype):
        arr = arr.astype('float64')

    if axes is None:
        axes = list(range(arr.ndim))
    else:
        try:
            axes = [int(axes)]
        except TypeError:
            axes = list(axes)

    shape = arr.shape
    shift_list = normalized_scalar_param_list(shift, length=len(axes),
                                              param_conv=bool)

    # Make a copy of arr with correct data type if necessary, or copy values.
    if out is None:
        if is_real_dtype(arr.dtype) and not all(shift_list):
            out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True)
        else:
            out = arr.copy()
    else:
        out[:] = arr

    if is_real_dtype(out.dtype) and not shift:
        raise ValueError('cannot pre-process real input in-place without '
                         'shift')

    if sign == '-':
        imag = -1j
    elif sign == '+':
        imag = 1j
    else:
        raise ValueError("`sign` '{}' not understood".format(sign))

    def _onedim_arr(length, shift):
        if shift:
            # (-1)^indices
            factor = np.ones(length, dtype=out.dtype)
            factor[1::2] = -1
        else:
            factor = np.arange(length, dtype=out.dtype)
            factor *= -imag * np.pi * (1 - 1.0 / length)
            np.exp(factor, out=factor)
        return factor.astype(out.dtype, copy=False)

    onedim_arrs = []
    for axis, shift in zip(axes, shift_list):
        length = shape[axis]
        onedim_arrs.append(_onedim_arr(length, shift))

    fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
    return out
示例#16
0
    def __init__(self, map_type, fspace, partition, tspace, linear=False):
        """Initialize a new instance.

        Parameters
        ----------
        map_type : {'sampling', 'interpolation'}
            The type of operator
        fspace : `FunctionSpace`
            The non-discretized (abstract) set of functions to be
            discretized
        partition : `RectPartition`
            Partition of (a subset of) ``fspace.domain`` based on a
            `RectGrid`.
        tspace : `TensorSpace`
            Space providing containers for the values/coefficients of a
            discretized object. Its `TensorSpace.shape` must be equal
            to ``partition.shape``.
        linear : bool, optional
            Create a linear operator if ``True``, otherwise a non-linear
            operator.
        """
        map_type, map_type_in = str(map_type).lower(), map_type
        if map_type not in ('sampling', 'interpolation'):
            raise ValueError('`map_type` {!r} not understood'
                             ''.format(map_type_in))
        if not isinstance(fspace, FunctionSpace):
            raise TypeError('`fspace` {!r} is not a `FunctionSpace` '
                            'instance'.format(fspace))

        if not isinstance(partition, RectPartition):
            raise TypeError('`partition` {!r} is not a `RectPartition` '
                            'instance'.format(partition))
        if not isinstance(tspace, TensorSpace):
            raise TypeError('`tspace` {!r} is not a `TensorSpace` instance'
                            ''.format(tspace))

        if not fspace.domain.contains_set(partition):
            raise ValueError('{} not contained in the domain {} '
                             'of the function set {}'
                             ''.format(partition, fspace.domain, fspace))

        if tspace.shape != partition.shape:
            raise ValueError('`tspace.shape` not equal to `partition.shape`: '
                             '{} != {}'
                             ''.format(tspace.shape, partition.shape))

        domain = fspace if map_type == 'sampling' else tspace
        range = tspace if map_type == 'sampling' else fspace
        super(FunctionSpaceMapping, self).__init__(domain,
                                                   range,
                                                   linear=linear)
        self.__partition = partition

        if self.is_linear:
            if self.domain.field is None:
                raise TypeError('`fspace.field` cannot be `None` for '
                                '`linear=True`')
            if not is_numeric_dtype(tspace.dtype):
                raise TypeError('`tspace.dtype` must be a numeric data type '
                                'for `linear=True`, got {}'
                                ''.format(dtype_repr(tspace)))
            if fspace.field != tspace.field:
                raise ValueError('`fspace.field` not equal to `tspace.field`: '
                                 '{} != {}'
                                 ''.format(fspace.field, tspace.field))