def test_normalized_scalar_param_list_single_val(length, single_conv): value, conversion = single_conv expected_noconv = [value] * length norm_param_noconv = normalized_scalar_param_list(value, length) assert expected_noconv == norm_param_noconv expected_conv = [conversion(value)] * length norm_param_conv = normalized_scalar_param_list( value, length, param_conv=conversion) assert expected_conv == norm_param_conv
def test_normalized_scalar_param_list_single_val(length, single_conv): value, conversion = single_conv expected_noconv = [value] * length norm_param_noconv = normalized_scalar_param_list(value, length) assert expected_noconv == norm_param_noconv expected_conv = [conversion(value)] * length norm_param_conv = normalized_scalar_param_list( value, length, param_conv=conversion) assert expected_conv == norm_param_conv
def test_normalized_scalar_param_list_sequence(length, seq_conv): value, conversion = seq_conv value = value * length expected_noconv = list(value) norm_param_noconv = normalized_scalar_param_list(value, length) assert expected_noconv == norm_param_noconv expected_conv = [conversion(v) for v in value] norm_param_conv = normalized_scalar_param_list( value, length, param_conv=conversion) assert expected_conv == norm_param_conv
def test_normalized_scalar_param_list_sequence(length, seq_conv): value, conversion = seq_conv value = value * length expected_noconv = list(value) norm_param_noconv = normalized_scalar_param_list(value, length) assert expected_noconv == norm_param_noconv expected_conv = [conversion(v) for v in value] norm_param_conv = normalized_scalar_param_list( value, length, param_conv=conversion) assert expected_conv == norm_param_conv
def test_normalized_scalar_param_list_with_none(): param1 = [1, None, 0] def conv_int_none(x): if x is None: return 0 else: return int(x) norm_param_noconv = normalized_scalar_param_list(param1, length=3) assert norm_param_noconv == param1 norm_param_conv1 = normalized_scalar_param_list( param1, length=3, param_conv=conv_int_none, keep_none=True) assert norm_param_conv1 == param1 norm_param_conv2 = normalized_scalar_param_list( param1, length=3, param_conv=conv_int_none, keep_none=False) assert norm_param_conv2 == [1, 0, 0] norm_param_noconv = normalized_scalar_param_list(None, length=3) assert norm_param_noconv == [None] * 3 norm_param_conv1 = normalized_scalar_param_list( None, length=3, param_conv=conv_int_none, keep_none=True) assert norm_param_conv1 == [None] * 3 norm_param_conv2 = normalized_scalar_param_list( None, length=3, param_conv=conv_int_none, keep_none=False) assert norm_param_conv2 == [0] * 3
def test_normalized_scalar_param_list_with_none(): param1 = [1, None, 0] def conv_int_none(x): if x is None: return 0 else: return int(x) norm_param_noconv = normalized_scalar_param_list(param1, length=3) assert norm_param_noconv == param1 norm_param_conv1 = normalized_scalar_param_list( param1, length=3, param_conv=conv_int_none, keep_none=True) assert norm_param_conv1 == param1 norm_param_conv2 = normalized_scalar_param_list( param1, length=3, param_conv=conv_int_none, keep_none=False) assert norm_param_conv2 == [1, 0, 0] norm_param_noconv = normalized_scalar_param_list(None, length=3) assert norm_param_noconv == [None] * 3 norm_param_conv1 = normalized_scalar_param_list( None, length=3, param_conv=conv_int_none, keep_none=True) assert norm_param_conv1 == [None] * 3 norm_param_conv2 = normalized_scalar_param_list( None, length=3, param_conv=conv_int_none, keep_none=False) assert norm_param_conv2 == [0] * 3
def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, direction='forward', out=None): """Return the resized version of ``arr`` with shape ``newshp``. In axes where ``newshp > arr.shape``, padding is applied according to the supplied options. Where ``newshp < arr.shape``, the array is cropped to the new size. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. Parameters ---------- arr : `array-like` Array to be resized. newshp : sequence of ints Desired shape of the output array. offset : sequence of ints, optional Specifies how many entries are added to/removed from the "left" side (corresponding to low indices) of ``arr``. pad_mode : string, optional Method to be used to fill in missing values in an enlarged array. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the outmost values. This requires left and right padding sizes to be strictly smaller than the original array shape. ``'periodic'``: Fill in values from the other side, keeping the order. This requires left and right padding sizes to be at most as large as the original array shape. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis where padding is applied. pad_const : scalar, optional Value to be used in the ``'constant'`` padding mode. direction : {'forward', 'adjoint'} Determines which variant of the resizing is applied. 'forward' : in axes where ``out`` is larger than ``arr``, apply padding. Otherwise, restrict to the smaller size. 'adjoint' : in axes where ``out`` is larger than ``arr``, apply zero-padding. Otherwise, restrict to the smaller size and add the outside contributions according to ``pad_mode``. out : `numpy.ndarray`, optional Array to write the result to. Must have shape ``newshp`` and be able to hold the data type of the input array. Returns ------- resized : `numpy.ndarray` Resized array created according to the above rules. If ``out`` was given, the returned object is a reference to it. Examples -------- The input can be shrunk by simply providing a smaller size. By default, values are removed from the right. When enlarging, zero-padding is applied by default, and the zeros are added to the right side. That behavior can be changed with the ``offset`` parameter: >>> from odl.util.numerics import resize_array >>> resize_array([1, 2, 3], (1,)) array([1]) >>> resize_array([1, 2, 3], (1,), offset=2) array([3]) >>> resize_array([1, 2, 3], (6,)) array([1, 2, 3, 0, 0, 0]) >>> resize_array([1, 2, 3], (7,), offset=2) array([0, 0, 1, 2, 3, 0, 0]) The padding constant can be changed, as well as the padding mode: >>> resize_array([1, 2, 3], (7,), pad_const=-1, offset=2) array([-1, -1, 1, 2, 3, -1, -1]) >>> resize_array([1, 2, 3], (7,), pad_mode='periodic', offset=2) array([2, 3, 1, 2, 3, 1, 2]) >>> resize_array([1, 2, 3], (7,), pad_mode='symmetric', offset=2) array([3, 2, 1, 2, 3, 2, 1]) >>> resize_array([1, 2, 3], (7,), pad_mode='order0', offset=2) array([1, 1, 1, 2, 3, 3, 3]) >>> resize_array([1, 2, 3], (7,), pad_mode='order1', offset=2) array([-1, 0, 1, 2, 3, 4, 5]) Everything works for arbitrary number of dimensions: >>> # Take the middle two columns and extend rows symmetrically >>> resize_array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12]], ... (5, 2), pad_mode='symmetric', offset=[1, 1]) array([[ 6, 7], [ 2, 3], [ 6, 7], [10, 11], [ 6, 7]]) >>> # Take the rightmost two columns and extend rows symmetrically >>> # downwards >>> resize_array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12]], (5, 2), pad_mode='symmetric', ... offset=[0, 2]) array([[ 3, 4], [ 7, 8], [11, 12], [ 7, 8], [ 3, 4]]) """ # Handle arrays and shapes try: newshp = tuple(newshp) except TypeError: raise TypeError('`newshp` must be a sequence, got {!r}'.format(newshp)) if out is not None: if not isinstance(out, np.ndarray): raise TypeError('`out` must be a `numpy.ndarray` instance, got ' '{!r}'.format(out)) if out.shape != newshp: raise ValueError('`out` must have shape {}, got {}' ''.format(newshp, out.shape)) order = 'C' if out.flags.c_contiguous else 'F' arr = np.asarray(arr, dtype=out.dtype, order=order) if arr.ndim != out.ndim: raise ValueError('number of axes of `arr` and `out` do not match ' '({} != {})'.format(arr.ndim, out.ndim)) else: arr = np.asarray(arr) order = 'C' if arr.flags.c_contiguous else 'F' out = np.empty(newshp, dtype=arr.dtype, order=order) if len(newshp) != arr.ndim: raise ValueError('number of axes of `arr` and `len(newshp)` do ' 'not match ({} != {})' ''.format(arr.ndim, len(newshp))) # Handle offset if offset is None: offset = [0] * out.ndim else: offset = normalized_scalar_param_list(offset, out.ndim, param_conv=safe_int_conv, keep_none=False) # Handle padding pad_mode, pad_mode_in = str(pad_mode).lower(), pad_mode if pad_mode not in _SUPPORTED_RESIZE_PAD_MODES: raise ValueError("`pad_mode` '{}' not understood".format(pad_mode_in)) if (pad_mode == 'constant' and not np.can_cast(pad_const, out.dtype) and any(n_new > n_orig for n_orig, n_new in zip(arr.shape, out.shape))): raise ValueError('`pad_const` {} cannot be safely cast to the data ' 'type {} of the output array' ''.format(pad_const, out.dtype)) # Handle direction direction, direction_in = str(direction).lower(), direction if direction not in ('forward', 'adjoint'): raise ValueError("`direction` '{}' not understood" "".format(direction_in)) if direction == 'adjoint' and pad_mode == 'constant' and pad_const != 0: raise ValueError("`pad_const` must be 0 for 'adjoint' direction, " "got {}".format(pad_const)) if direction == 'forward' and pad_mode == 'constant' and pad_const != 0: out.fill(pad_const) else: out.fill(0) # Perform the resizing if direction == 'forward': if pad_mode == 'constant': # Constant padding does not require the helper function _assign_intersection(out, arr, offset) else: # First copy the inner part and use it for padding _assign_intersection(out, arr, offset) _apply_padding(out, arr, offset, pad_mode, 'forward') else: if pad_mode == 'constant': # Skip the padding helper _assign_intersection(out, arr, offset) else: # Apply adjoint padding to a copy of the input and copy the inner # part when finished tmp = arr.copy() _apply_padding(tmp, out, offset, pad_mode, 'adjoint') _assign_intersection(out, tmp, offset) return out
def test_normalized_scalar_param_list_error(): # Wrong length with pytest.raises(ValueError): normalized_scalar_param_list([1, 2], length=3)
def test_normalized_scalar_param_list_error(): # Wrong length with pytest.raises(ValueError): normalized_scalar_param_list([1, 2], length=3)
def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, direction='forward', out=None): """Return the resized version of ``arr`` with shape ``newshp``. In axes where ``newshp > arr.shape``, padding is applied according to the supplied options. Where ``newshp < arr.shape``, the array is cropped to the new size. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. Parameters ---------- arr : `array-like` Array to be resized. newshp : sequence of ints Desired shape of the output array. offset : sequence of ints, optional Specifies how many entries are added to/removed from the "left" side (corresponding to low indices) of ``arr``. pad_mode : string, optional Method to be used to fill in missing values in an enlarged array. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the outmost values. This requires left and right padding sizes to be strictly smaller than the original array shape. ``'periodic'``: Fill in values from the other side, keeping the order. This requires left and right padding sizes to be at most as large as the original array shape. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis where padding is applied. pad_const : scalar, optional Value to be used in the ``'constant'`` padding mode. direction : {'forward', 'adjoint'} Determines which variant of the resizing is applied. 'forward' : in axes where ``out`` is larger than ``arr``, apply padding. Otherwise, restrict to the smaller size. 'adjoint' : in axes where ``out`` is larger than ``arr``, apply zero-padding. Otherwise, restrict to the smaller size and add the outside contributions according to ``pad_mode``. out : `numpy.ndarray`, optional Array to write the result to. Must have shape ``newshp`` and be able to hold the data type of the input array. Returns ------- resized : `numpy.ndarray` Resized array created according to the above rules. If ``out`` was given, the returned object is a reference to it. Examples -------- The input can be shrunk by simply providing a smaller size. By default, values are removed from the right. When enlarging, zero-padding is applied by default, and the zeros are added to the right side. That behavior can be changed with the ``offset`` parameter: >>> from odl.util.numerics import resize_array >>> resize_array([1, 2, 3], (1,)) array([1]) >>> resize_array([1, 2, 3], (1,), offset=2) array([3]) >>> resize_array([1, 2, 3], (6,)) array([1, 2, 3, 0, 0, 0]) >>> resize_array([1, 2, 3], (7,), offset=2) array([0, 0, 1, 2, 3, 0, 0]) The padding constant can be changed, as well as the padding mode: >>> resize_array([1, 2, 3], (7,), pad_const=-1, offset=2) array([-1, -1, 1, 2, 3, -1, -1]) >>> resize_array([1, 2, 3], (7,), pad_mode='periodic', offset=2) array([2, 3, 1, 2, 3, 1, 2]) >>> resize_array([1, 2, 3], (7,), pad_mode='symmetric', offset=2) array([3, 2, 1, 2, 3, 2, 1]) >>> resize_array([1, 2, 3], (7,), pad_mode='order0', offset=2) array([1, 1, 1, 2, 3, 3, 3]) >>> resize_array([1, 2, 3], (7,), pad_mode='order1', offset=2) array([-1, 0, 1, 2, 3, 4, 5]) Everything works for arbitrary number of dimensions: >>> # Take the middle two columns and extend rows symmetrically >>> resize_array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12]], ... (5, 2), pad_mode='symmetric', offset=[1, 1]) array([[ 6, 7], [ 2, 3], [ 6, 7], [10, 11], [ 6, 7]]) >>> # Take the rightmost two columns and extend rows symmetrically >>> # downwards >>> resize_array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12]], (5, 2), pad_mode='symmetric', ... offset=[0, 2]) array([[ 3, 4], [ 7, 8], [11, 12], [ 7, 8], [ 3, 4]]) """ # Handle arrays and shapes try: newshp = tuple(newshp) except TypeError: raise TypeError('`newshp` must be a sequence, got {!r}'.format(newshp)) if out is not None: if not isinstance(out, np.ndarray): raise TypeError('`out` must be a `numpy.ndarray` instance, got ' '{!r}'.format(out)) if out.shape != newshp: raise ValueError('`out` must have shape {}, got {}' ''.format(newshp, out.shape)) order = 'C' if out.flags.c_contiguous else 'F' arr = np.asarray(arr, dtype=out.dtype, order=order) if arr.ndim != out.ndim: raise ValueError('number of axes of `arr` and `out` do not match ' '({} != {})'.format(arr.ndim, out.ndim)) else: arr = np.asarray(arr) order = 'C' if arr.flags.c_contiguous else 'F' out = np.empty(newshp, dtype=arr.dtype, order=order) if len(newshp) != arr.ndim: raise ValueError('number of axes of `arr` and `len(newshp)` do ' 'not match ({} != {})' ''.format(arr.ndim, len(newshp))) # Handle offset if offset is None: offset = [0] * out.ndim else: offset = normalized_scalar_param_list( offset, out.ndim, param_conv=safe_int_conv, keep_none=False) # Handle padding pad_mode, pad_mode_in = str(pad_mode).lower(), pad_mode if pad_mode not in _SUPPORTED_RESIZE_PAD_MODES: raise ValueError("`pad_mode` '{}' not understood".format(pad_mode_in)) if (pad_mode == 'constant' and not np.can_cast(pad_const, out.dtype) and any(n_new > n_orig for n_orig, n_new in zip(arr.shape, out.shape))): raise ValueError('`pad_const` {} cannot be safely cast to the data ' 'type {} of the output array' ''.format(pad_const, out.dtype)) # Handle direction direction, direction_in = str(direction).lower(), direction if direction not in ('forward', 'adjoint'): raise ValueError("`direction` '{}' not understood" "".format(direction_in)) if direction == 'adjoint' and pad_mode == 'constant' and pad_const != 0: raise ValueError("`pad_const` must be 0 for 'adjoint' direction, " "got {}".format(pad_const)) if direction == 'forward' and pad_mode == 'constant' and pad_const != 0: out.fill(pad_const) else: out.fill(0) # Perform the resizing if direction == 'forward': if pad_mode == 'constant': # Constant padding does not require the helper function _assign_intersection(out, arr, offset) else: # First copy the inner part and use it for padding _assign_intersection(out, arr, offset) _apply_padding(out, arr, offset, pad_mode, 'forward') else: if pad_mode == 'constant': # Skip the padding helper _assign_intersection(out, arr, offset) else: # Apply adjoint padding to a copy of the input and copy the inner # part when finished tmp = arr.copy() _apply_padding(tmp, out, offset, pad_mode, 'adjoint') _assign_intersection(out, tmp, offset) return out
def __init__(self, domain, range=None, ran_shp=None, **kwargs): """Initialize a new instance. Parameters ---------- domain : uniform `DiscreteLp` Uniformly discretized space, the operator can be applied to its elements. range : uniform `DiscreteLp`, optional Uniformly discretized space in which the result of the application of this operator lies. For the default ``None``, a space with the same attributes as ``domain`` is used, except for its shape, which is set to ``ran_shp``. ran_shp : sequence of ints, optional Shape of the range of this operator. This can be provided instead of ``range`` and is mandatory if ``range`` is ``None``. offset : int or sequence of ints, optional Number of cells to add to/remove from the left of ``domain.partition``. By default, the difference is distributed evenly, with preference for left in case of ambiguity. This option is can only be used together with ``ran_shp``. pad_mode : string, optional Method to be used to fill in missing values in an enlarged array. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the outmost values. This requires left and right padding sizes to be strictly smaller than the original array shape. ``'periodic'``: Fill in values from the other side, keeping the order. This requires left and right padding sizes to be at most as large as the original array shape. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis where padding is applied. pad_const : scalar, optional Value to be used in the ``'constant'`` padding mode. discr_kwargs: dict, optional Keyword arguments passed to the `uniform_discr` constructor. Examples -------- The simplest way of initializing a resizing operator is by providing ``ran_shp`` and, optionally, parameters for the padding variant to be used. The range is inferred from ``domain`` and the supplied parameters. If no ``offset`` is given, the difference in size is evenly distributed to both sides: >>> space = odl.uniform_discr([0, 0], [1, 1], (2, 4)) >>> resize_op = odl.ResizingOperator(space, ran_shp=(4, 4)) >>> resize_op.range uniform_discr([-0.5, 0.0], [1.5, 1.0], [4, 4]) Testing different padding methods in the first axis (zero padding is the default): >>> x = [[1, 2, 3, 4], ... [5, 6, 7, 8]] >>> resize_op = odl.ResizingOperator(space, ran_shp=(4, 4)) >>> print(resize_op(x)) [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [0.0, 0.0, 0.0, 0.0]] >>> >>> resize_op = odl.ResizingOperator(space, ran_shp=(4, 4), ... offset=(0, 0), ... pad_mode='periodic') >>> print(resize_op(x)) [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]] >>> >>> resize_op = odl.ResizingOperator(space, ran_shp=(4, 4), ... offset=(0, 0), ... pad_mode='order0') >>> print(resize_op(x)) [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] Alternatively, the range of the operator can be provided directly. This requires that the partitions match, i.e. that the cell sizes are the same and there is no shift: >>> # Same space as in the first example, see above >>> large_spc = odl.uniform_discr([-0.5, 0], [1.5, 1], (4, 4)) >>> resize_op = odl.ResizingOperator(space, large_spc, ... pad_mode='periodic') >>> print(resize_op(x)) [[5.0, 6.0, 7.0, 8.0], [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [1.0, 2.0, 3.0, 4.0]] """ if not isinstance(domain, DiscreteLp): raise TypeError('`domain` must be a `DiscreteLp` instance, ' 'got {!r}'.format(domain)) if not domain.is_uniform: raise ValueError('`domain` is not uniformly discretized') offset = kwargs.pop('offset', None) discr_kwargs = kwargs.pop('discr_kwargs', {}) if range is None: if ran_shp is None: raise ValueError('either `range` or `ran_shp` must be ' 'given') offset = normalized_scalar_param_list( offset, domain.ndim, param_conv=safe_int_conv, keep_none=True) range = _resize_discr(domain, ran_shp, offset, discr_kwargs) self.__offset = tuple(_offset_from_spaces(domain, range)) elif ran_shp is None: if offset is not None: raise ValueError('`offset` can only be combined with ' '`ran_shp`') if not np.allclose(range.cell_sides, domain.cell_sides): raise ValueError( 'cell sides of domain and range differ significantly ' '(difference {})' ''.format(range.cell_sides - domain.cell_sides)) self.__offset = _offset_from_spaces(domain, range) else: raise ValueError('cannot combine `range` with `ran_shape`') pad_mode = kwargs.pop('pad_mode', 'constant') pad_mode, pad_mode_in = str(pad_mode).lower(), pad_mode if pad_mode not in _SUPPORTED_RESIZE_PAD_MODES: raise ValueError("`pad_mode` '{}' not understood" "".format(pad_mode_in)) self.__pad_mode = pad_mode # Store constant in a way that ensures safe casting (one-element array) self.__pad_const = np.array(kwargs.pop('pad_const', 0), dtype=range.dtype) # padding mode 'constant' with `pad_const != 0` is not linear linear = (self.pad_mode != 'constant' or self.pad_const == 0.0) super().__init__(domain, range, linear=linear)
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None, nodes_on_bdry=False): """Return a partition with equally sized cells. Parameters ---------- min_pt, max_pt : float or sequence of float, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". shape : int or sequence of ints, optional Number of nodes per axis. ``None`` entries mean "compute the value". cell_sides : float or sequence of floats, optional Side length of the partition cells per axis. ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Notes ----- In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``, ``shape`` and ``cell_sides`` must be given. If all four are provided, they are checked for consistency. See Also -------- uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- Any combination of three of the four parameters can be used for creation of a partition: >>> part = uniform_partition(min_pt=0, max_pt=2, shape=4) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = uniform_partition(min_pt=0, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = uniform_partition(max_pt=2, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) In higher dimensions, the parameters can be given differently in each axis. Where ``None`` is given, the value will be computed: >>> part = uniform_partition(min_pt=[0, 0], max_pt=[1, 2], shape=[4, 2]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = uniform_partition(min_pt=[0, 0], max_pt=[1, 2], ... shape=[None, 2], cell_sides=[0.25, None]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = uniform_partition(min_pt=[0, None], max_pt=[None, 2], ... shape=[4, 2], cell_sides=[0.25, 1]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) By default, no grid points are placed on the boundary: >>> part = uniform_partition(0, 1, 4) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = uniform_partition(0, 1, 3, nodes_on_bdry=True) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> part = uniform_partition([0, 0], [1, 1], (3, 3), ... nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ]) """ # Normalize partition parameters # np.size(None) == 1 sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides)] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv, keep_none=True) cell_sides = normalized_scalar_param_list(cell_sides, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt, shape for i, (xmin, xmax, n, dx, on_bdry) in enumerate( zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)): num_params = sum(p is not None for p in (xmin, xmax, n, dx)) if num_params < 3: raise ValueError('in axis {}: expected at least 3 of the ' 'parameters `min_pt`, `max_pt`, `shape`, ' '`cell_sides`, got {}' ''.format(i, num_params)) # Unpack the tuple if possible, else use bool globally for this axis try: bdry_l, bdry_r = on_bdry except TypeError: bdry_l = bdry_r = on_bdry # For each node on the boundary, we subtract 1/2 from the number of # full cells between min_pt and max_pt. if xmin is None: min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif xmax is None: max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif n is None: # Here we add to n since (e-b)/s gives the reduced number of cells. n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0 n_round = int(round(n_calc)) if abs(n_calc - n_round) > 1e-5: raise ValueError('in axis {}: calculated number of nodes ' '{} = ({} - {}) / {} too far from integer' ''.format(i, n_calc, xmax, xmin, dx)) shape[i] = n_round elif dx is None: pass else: xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx if not np.isclose(xmax, xmax_calc): raise ValueError('in axis {}: calculated endpoint ' '{} = {} + {} * {} too far from given ' 'endpoint {}.' ''.format(i, xmax_calc, xmin, n, dx, xmax)) return uniform_partition_fromintv( IntervalProd(min_pt, max_pt), shape, nodes_on_bdry)
def nonuniform_partition(*coord_vecs, **kwargs): """Return a partition with un-equally sized cells. Parameters ---------- coord_vecs1, ... coord_vecsN : `array-like` Arrays of coordinates of the mid-points of the partition cells. min_pt, max_pt : float or sequence of floats, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Cannot be given with both min_pt and max_pt since they determine the same thing. See Also -------- uniform_partition : uniformly spaced points uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- With uniformly spaced points the result is the same as a uniform partition: >>> nonuniform_partition([0, 1, 2, 3]) uniform_partition(-0.5, 3.5, 4) >>> nonuniform_partition([0, 1, 2, 3], [1, 2]) uniform_partition([-0.5, 0.5], [3.5, 2.5], [4, 2]) If the points are not uniformly spaced a nonuniform partition is created. Note that the containing interval is calculated by assuming that the points are in the middle of the sub-intervals: >>> nonuniform_partition([0, 1, 3]) RectPartition( IntervalProd(-0.5, 4.0), TensorGrid([0.0, 1.0, 3.0])) Higher dimensional partitions are created by specifying the gridpoints along each dimension: >>> nonuniform_partition([0, 1, 3], [1, 2]) RectPartition( IntervalProd([-0.5, 0.5], [4.0, 2.5]), TensorGrid([0.0, 1.0, 3.0], [1.0, 2.0])) If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter can be used: >>> nonuniform_partition([0, 1, 3], nodes_on_bdry=True) RectPartition( IntervalProd(0.0, 3.0), TensorGrid([0.0, 1.0, 3.0])) Users can also manually specify the containing intervals dimensions by using the ``min_pt`` and ``max_pt`` arguments: >>> nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3) RectPartition( IntervalProd(-2.0, 3.0), TensorGrid([0.0, 1.0, 3.0])) """ # Get parameters from kwargs min_pt = kwargs.pop('min_pt', None) max_pt = kwargs.pop('max_pt', None) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) # np.size(None) == 1 sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate( zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)): # Check input for redundancy if xmin is not None and bdry_l: raise ValueError('in axis {}: got both `min_pt` and ' '`nodes_on_bdry=True`'.format(i)) if xmax is not None and bdry_r: raise ValueError('in axis {}: got both `max_pt` and ' '`nodes_on_bdry=True`'.format(i)) # Compute boundary position if not given by user if xmin is None: if bdry_l: min_pt[i] = coords[0] else: min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0 if xmax is None: if bdry_r: max_pt[i] = coords[-1] else: max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0 interval = IntervalProd(min_pt, max_pt) grid = TensorGrid(*coord_vecs) return RectPartition(interval, grid)