def test_resizing_op_call(fn_impl): dtypes = [dt for dt in odl.FN_IMPLS[fn_impl].available_dtypes() if is_scalar_dtype(dt)] for dtype in dtypes: # Minimal test since this operator only wraps resize_array space = odl.uniform_discr([0, -1], [1, 1], (4, 5), impl=fn_impl) res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2), impl=fn_impl) res_op = odl.ResizingOperator(space, res_space) out = res_op(space.one()) true_res = np.zeros((8, 2)) true_res[:4, :] = 1 assert np.array_equal(out, true_res) out = res_space.element() res_op(space.one(), out=out) assert np.array_equal(out, true_res) # Test also mapping to default impl for other 'fn_impl' if fn_impl != 'numpy': space = odl.uniform_discr([0, -1], [1, 1], (4, 5), impl=fn_impl) res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2)) res_op = odl.ResizingOperator(space, res_space) out = res_op(space.one()) true_res = np.zeros((8, 2)) true_res[:4, :] = 1 assert np.array_equal(out, true_res) out = res_space.element() res_op(space.one(), out=out) assert np.array_equal(out, true_res)
def test_resizing_op_call(fn_impl): dtypes = [ dt for dt in odl.FN_IMPLS[fn_impl].available_dtypes() if is_scalar_dtype(dt) ] for dtype in dtypes: # Minimal test since this operator only wraps resize_array space = odl.uniform_discr([0, -1], [1, 1], (4, 5), impl=fn_impl) res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2), impl=fn_impl) res_op = odl.ResizingOperator(space, res_space) out = res_op(space.one()) true_res = np.zeros((8, 2)) true_res[:4, :] = 1 assert np.array_equal(out, true_res) out = res_space.element() res_op(space.one(), out=out) assert np.array_equal(out, true_res) # Test also mapping to default impl for other 'fn_impl' if fn_impl != 'numpy': space = odl.uniform_discr([0, -1], [1, 1], (4, 5), impl=fn_impl) res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2)) res_op = odl.ResizingOperator(space, res_space) out = res_op(space.one()) true_res = np.zeros((8, 2)) true_res[:4, :] = 1 assert np.array_equal(out, true_res) out = res_space.element() res_op(space.one(), out=out) assert np.array_equal(out, true_res)
def dspace_type(space, impl, dtype=None): """Select the correct corresponding n-tuples space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate data space. If it has a `LinearSpace.field` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the data space dtype : `numpy.dtype`, optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type """ spacetype_map = { RealNumbers: FN_IMPLS, ComplexNumbers: FN_IMPLS, type(None): NTUPLES_IMPLS } field_type = type(getattr(space, 'field', None)) if dtype is None: pass elif is_real_floating_dtype(dtype): if field_type is None or field_type == ComplexNumbers: raise TypeError('real floating data type {!r} requires space ' 'field to be of type RealNumbers, got {}' ''.format(dtype, field_type)) elif is_complex_floating_dtype(dtype): if field_type is None or field_type == RealNumbers: raise TypeError('complex floating data type {!r} requires space ' 'field to be of type ComplexNumbers, got {!r}' ''.format(dtype, field_type)) elif is_scalar_dtype(dtype): if field_type == ComplexNumbers: raise TypeError('non-floating data type {!r} requires space field ' 'to be of type RealNumbers, got {!r}'.format( dtype, field_type)) else: raise TypeError('non-scalar data type {!r} cannot be combined with ' 'a `LinearSpace`'.format(dtype)) stype = spacetype_map[field_type].get(impl, None) if stype is None: raise NotImplementedError('no corresponding data space available ' 'for space {!r} and implementation {!r}' ''.format(space, impl)) return stype
def dspace_type(space, impl, dtype=None): """Select the correct corresponding n-tuples space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate data space. If it has a `LinearSpace.field` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the data space dtype : `numpy.dtype`, optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type """ spacetype_map = {RealNumbers: FN_IMPLS, ComplexNumbers: FN_IMPLS, type(None): NTUPLES_IMPLS} field_type = type(getattr(space, 'field', None)) if dtype is None: pass elif is_real_floating_dtype(dtype): if field_type is None or field_type == ComplexNumbers: raise TypeError('real floating data type {!r} requires space ' 'field to be of type RealNumbers, got {}' ''.format(dtype, field_type)) elif is_complex_floating_dtype(dtype): if field_type is None or field_type == RealNumbers: raise TypeError('complex floating data type {!r} requires space ' 'field to be of type ComplexNumbers, got {!r}' ''.format(dtype, field_type)) elif is_scalar_dtype(dtype): if field_type == ComplexNumbers: raise TypeError('non-floating data type {!r} requires space field ' 'to be of type RealNumbers, got {!r}' .format(dtype, field_type)) else: raise TypeError('non-scalar data type {!r} cannot be combined with ' 'a `LinearSpace`'.format(dtype)) stype = spacetype_map[field_type].get(impl, None) if stype is None: raise NotImplementedError('no corresponding data space available ' 'for space {!r} and implementation {!r}' ''.format(space, impl)) return stype
def test_resizing_op_inverse(padding, fn_impl): pad_mode, pad_const = padding dtypes = [dt for dt in odl.FN_IMPLS[fn_impl].available_dtypes() if is_scalar_dtype(dt)] for dtype in dtypes: space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype, impl=fn_impl) res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7), dtype=dtype, impl=fn_impl) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) # Only left inverse if the operator extentds in all axes x = noise_element(space) assert res_op.inverse(res_op(x)) == x
def test_resizing_op_properties(fn_impl, padding): dtypes = [ dt for dt in odl.FN_IMPLS[fn_impl].available_dtypes() if is_scalar_dtype(dt) ] pad_mode, pad_const = padding for dtype in dtypes: # Explicit range space = odl.uniform_discr([0, -1], [1, 1], (10, 5), dtype=dtype) res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), dtype=dtype) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) assert res_op.domain == space assert res_op.range == res_space assert res_op.offset == (0, 5) assert res_op.pad_mode == pad_mode assert res_op.pad_const == pad_const if pad_mode == 'constant' and pad_const != 0: assert not res_op.is_linear else: assert res_op.is_linear # Implicit range via ran_shp and offset res_op = odl.ResizingOperator(space, ran_shp=(20, 15), offset=[0, 5], pad_mode=pad_mode, pad_const=pad_const) assert np.allclose(res_op.range.min_pt, res_space.min_pt) assert np.allclose(res_op.range.max_pt, res_space.max_pt) assert np.allclose(res_op.range.cell_sides, res_space.cell_sides) assert res_op.range.dtype == res_space.dtype assert res_op.offset == (0, 5) assert res_op.pad_mode == pad_mode assert res_op.pad_const == pad_const if pad_mode == 'constant' and pad_const != 0: assert not res_op.is_linear else: assert res_op.is_linear
def __init__(self, size, dtype): """Initialize a new instance. Parameters ---------- size : non-negative int Number of entries in a tuple. dtype : Data type for each tuple entry. Can be provided in any way the `numpy.dtype` function understands, most notably as built-in type, as one of NumPy's internal datatype objects or as string. Only scalar data types (numbers) are allowed. """ NtuplesBase.__init__(self, size, dtype) if not is_scalar_dtype(self.dtype): raise TypeError('{!r} is not a scalar data type'.format(dtype)) if is_real_dtype(self.dtype): field = RealNumbers() self.__is_real = True self.__real_dtype = self.dtype self.__real_space = self try: self.__complex_dtype = complex_dtype(self.dtype) except ValueError: self.__complex_dtype = None self.__complex_space = None # Set in first call of astype else: field = ComplexNumbers() self.__is_real = False try: self.__real_dtype = real_dtype(self.dtype) except ValueError: self.__real_dtype = None self.__real_space = None # Set in first call of astype self.__complex_dtype = self.dtype self.__complex_space = self self.__is_floating = is_floating_dtype(self.dtype) LinearSpace.__init__(self, field)
def __init__(self, size, dtype): """Initialize a new instance. Parameters ---------- size : non-negative int Number of entries in a tuple. dtype : Data type for each tuple entry. Can be provided in any way the `numpy.dtype` function understands, most notably as built-in type, as one of NumPy's internal datatype objects or as string. Only scalar data types (numbers) are allowed. """ NtuplesBase.__init__(self, size, dtype) if not is_scalar_dtype(self.dtype): raise TypeError('{!r} is not a scalar data type'.format(dtype)) if is_real_dtype(self.dtype): field = RealNumbers() self.__is_real = True self.__real_dtype = self.dtype self.__real_space = self try: self.__complex_dtype = complex_dtype(self.dtype) except ValueError: self.__complex_dtype = None self.__complex_space = None # Set in first call of astype else: field = ComplexNumbers() self.__is_real = False try: self.__real_dtype = real_dtype(self.dtype) except ValueError: self.__real_dtype = None self.__real_space = None # Set in first call of astype self.__complex_dtype = self.dtype self.__complex_space = self self.__is_floating = is_floating_dtype(self.dtype) LinearSpace.__init__(self, field)
def test_resizing_op_inverse(padding, fn_impl): pad_mode, pad_const = padding dtypes = [ dt for dt in odl.fn_impl(fn_impl).available_dtypes() if is_scalar_dtype(dt) ] for dtype in dtypes: space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype, impl=fn_impl) res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7), dtype=dtype, impl=fn_impl) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) # Only left inverse if the operator extentds in all axes x = noise_element(space) assert res_op.inverse(res_op(x)) == x
def test_resizing_op_properties(fn_impl, padding): dtypes = [dt for dt in odl.FN_IMPLS[fn_impl].available_dtypes() if is_scalar_dtype(dt)] pad_mode, pad_const = padding for dtype in dtypes: # Explicit range space = odl.uniform_discr([0, -1], [1, 1], (10, 5), dtype=dtype) res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), dtype=dtype) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) assert res_op.domain == space assert res_op.range == res_space assert res_op.offset == (0, 5) assert res_op.pad_mode == pad_mode assert res_op.pad_const == pad_const if pad_mode == 'constant' and pad_const != 0: assert not res_op.is_linear else: assert res_op.is_linear # Implicit range via ran_shp and offset res_op = odl.ResizingOperator(space, ran_shp=(20, 15), offset=[0, 5], pad_mode=pad_mode, pad_const=pad_const) assert np.allclose(res_op.range.min_pt, res_space.min_pt) assert np.allclose(res_op.range.max_pt, res_space.max_pt) assert np.allclose(res_op.range.cell_sides, res_space.cell_sides) assert res_op.range.dtype == res_space.dtype assert res_op.offset == (0, 5) assert res_op.pad_mode == pad_mode assert res_op.pad_const == pad_const if pad_mode == 'constant' and pad_const != 0: assert not res_op.is_linear else: assert res_op.is_linear
def contains_all(self, other): """Return ``True`` if ``other`` is a sequence of complex numbers.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_scalar_dtype(dtype)
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): """Pre-process the real-space data before DFT. This function multiplies the given data with the separable function:: p(x) = exp(+- 1j * dot(x - x[0], xi[0])) where ``x[0]`` and ``xi[0]`` are the minimum coodinates of the real-space and reciprocal grids, respectively. The sign of the exponent depends on the choice of ``sign``. In discretized form, this function becomes an array:: p[k] = exp(+- 1j * k * s * xi[0]) If the reciprocal grid is not shifted, i.e. symmetric around 0, it is ``xi[0] = pi/s * (-1 + 1/N)``, hence:: p[k] = exp(-+ 1j * pi * k * (1 - 1/N)) For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the array is given by:: p[k] = (-1)**k Parameters ---------- arr : `array-like` Array to be pre-processed. If its data type is a real non-floating type, it is converted to 'float64'. shift : bool or or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. Default: all axes. sign : {'-', '+'}, optional Sign of the complex exponent. out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. For real data type, this is only possible for ``shift=True`` since the factors are complex otherwise. Returns ------- out : `numpy.ndarray` Result of the pre-processing. If ``out`` was given, the returned object is a reference to it. Notes ----- If ``out`` is not specified, the data type of the returned array is the same as that of ``arr`` except when ``arr`` has real data type and ``shift`` is not ``True``. In this case, the return type is the complex counterpart of ``arr.dtype``. """ arr = np.asarray(arr) if not is_scalar_dtype(arr.dtype): raise ValueError('array has non-scalar data type {}' ''.format(dtype_repr(arr.dtype))) elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype): arr = arr.astype('float64') if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shape = arr.shape shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Make a copy of arr with correct data type if necessary, or copy values. if out is None: if is_real_dtype(arr.dtype) and not all(shift_list): out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) else: out = arr.copy() else: out[:] = arr if is_real_dtype(out.dtype) and not shift: raise ValueError('cannot pre-process real input in-place without ' 'shift') if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) def _onedim_arr(length, shift): if shift: # (-1)^indices factor = np.ones(length, dtype=out.dtype) factor[1::2] = -1 else: factor = np.arange(length, dtype=out.dtype) factor *= -imag * np.pi * (1 - 1.0 / length) np.exp(factor, out=factor) return factor.astype(out.dtype, copy=False) onedim_arrs = [] for axis, shift in zip(axes, shift_list): length = shape[axis] onedim_arrs.append(_onedim_arr(length, shift)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out
def vector(array, dtype=None, impl='numpy'): """Create an n-tuples type vector from an array. Parameters ---------- array : `array-like` Array from which to create the vector. Scalars become one-dimensional vectors. dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. impl : string, optional The backend to use. See `odl.space.entry_points.NTUPLES_IMPLS` and `odl.space.entry_points.FN_IMPLS` for available options. Returns ------- vec : `NtuplesBaseVector` Vector created from the input array. Its concrete type depends on the provided arguments. Notes ----- This is a convenience function and not intended for use in speed-critical algorithms. Examples -------- >>> vector([1, 2, 3]) # No automatic cast to float fn(3, 'int').element([1, 2, 3]) >>> vector([1, 2, 3], dtype=float) rn(3).element([1.0, 2.0, 3.0]) >>> vector([1 + 1j, 2, 3 - 2j]) cn(3).element([(1+1j), (2+0j), (3-2j)]) Non-scalar types are also supported: >>> vector([True, False]) ntuples(2, 'bool').element([True, False]) Scalars become a one-element vector: >>> vector(0.0) rn(1).element([0.0]) """ # Sanitize input arr = np.array(array, copy=False, ndmin=1) # Validate input if arr.ndim > 1: raise ValueError('array has {} dimensions, expected 1' ''.format(arr.ndim)) # Set dtype if dtype is not None: space_dtype = dtype else: space_dtype = arr.dtype # Select implementation if space_dtype is None or is_scalar_dtype(space_dtype): space_type = fn else: space_type = ntuples return space_type(len(arr), dtype=space_dtype, impl=impl).element(arr)
def contains_all(self, other): """Return ``True`` if ``other`` is a sequence of complex numbers.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_scalar_dtype(dtype)
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): """Pre-process the real-space data before DFT. This function multiplies the given data with the separable function:: p(x) = exp(+- 1j * dot(x - x[0], xi[0])) where ``x[0]`` and ``xi[0]`` are the minimum coodinates of the real-space and reciprocal grids, respectively. The sign of the exponent depends on the choice of ``sign``. In discretized form, this function becomes an array:: p[k] = exp(+- 1j * k * s * xi[0]) If the reciprocal grid is not shifted, i.e. symmetric around 0, it is ``xi[0] = pi/s * (-1 + 1/N)``, hence:: p[k] = exp(-+ 1j * pi * k * (1 - 1/N)) For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the array is given by:: p[k] = (-1)**k Parameters ---------- arr : `array-like` Array to be pre-processed. If its data type is a real non-floating type, it is converted to 'float64'. shift : bool or or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. Default: all axes. sign : {'-', '+'}, optional Sign of the complex exponent. out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. For real data type, this is only possible for ``shift=True`` since the factors are complex otherwise. Returns ------- out : `numpy.ndarray` Result of the pre-processing. If ``out`` was given, the returned object is a reference to it. Notes ----- If ``out`` is not specified, the data type of the returned array is the same as that of ``arr`` except when ``arr`` has real data type and ``shift`` is not ``True``. In this case, the return type is the complex counterpart of ``arr.dtype``. """ arr = np.asarray(arr) if not is_scalar_dtype(arr.dtype): raise ValueError('array has non-scalar data type {}' ''.format(dtype_repr(arr.dtype))) elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype): arr = arr.astype('float64') if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shape = arr.shape shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Make a copy of arr with correct data type if necessary, or copy values. if out is None: if is_real_dtype(arr.dtype) and not all(shift_list): out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) else: out = arr.copy() else: out[:] = arr if is_real_dtype(out.dtype) and not shift: raise ValueError('cannot pre-process real input in-place without ' 'shift') if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) def _onedim_arr(length, shift): if shift: # (-1)^indices factor = np.ones(length, dtype=out.dtype) factor[1::2] = -1 else: factor = np.arange(length, dtype=out.dtype) factor *= -imag * np.pi * (1 - 1.0 / length) np.exp(factor, out=factor) return factor.astype(out.dtype, copy=False) onedim_arrs = [] for axis, shift in zip(axes, shift_list): length = shape[axis] onedim_arrs.append(_onedim_arr(length, shift)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out
def vector(array, dtype=None, impl='numpy'): """Create an n-tuples type vector from an array. Parameters ---------- array : `array-like` Array from which to create the vector. Scalars become one-dimensional vectors. dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. impl : string, optional The backend to use. See `odl.space.entry_points.NTUPLES_IMPLS` and `odl.space.entry_points.FN_IMPLS` for available options. Returns ------- vec : `NtuplesBaseVector` Vector created from the input array. Its concrete type depends on the provided arguments. Notes ----- This is a convenience function and not intended for use in speed-critical algorithms. Examples -------- >>> vector([1, 2, 3]) # No automatic cast to float fn(3, 'int').element([1, 2, 3]) >>> vector([1, 2, 3], dtype=float) rn(3).element([1.0, 2.0, 3.0]) >>> vector([1 + 1j, 2, 3 - 2j]) cn(3).element([(1+1j), (2+0j), (3-2j)]) Non-scalar types are also supported: >>> vector([True, False]) ntuples(2, 'bool').element([True, False]) Scalars become a one-element vector: >>> vector(0.0) rn(1).element([0.0]) """ # Sanitize input arr = np.array(array, copy=False, ndmin=1) # Validate input if arr.ndim > 1: raise ValueError('array has {} dimensions, expected 1' ''.format(arr.ndim)) # Set dtype if dtype is not None: space_dtype = dtype else: space_dtype = arr.dtype # Select implementation if space_dtype is None or is_scalar_dtype(space_dtype): space_type = fn else: space_type = ntuples return space_type(len(arr), dtype=space_dtype, impl=impl).element(arr)