Exemplo n.º 1
0
    def test_prod(self):
        x = matrix([[1, 2, 3], [4, 5, 6]])
        assert_equal(x.prod(), 720)
        assert_equal(x.prod(0), matrix([[4, 10, 18]]))
        assert_equal(x.prod(1), matrix([[6], [120]]))

        assert_equal(np.prod(x), 720)
        assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]]))
        assert_equal(np.prod(x, axis=1), matrix([[6], [120]]))

        y = matrix([0, 1, 3])
        assert_(y.prod() == 0)
Exemplo n.º 2
0
 def setup(self):
     self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
     self.b = np.empty((3, 0, 5, 6))
     self.complex_indices = ['skip', Ellipsis,
         0,
         # Boolean indices, up to 3-d for some special cases of eating up
         # dimensions, also need to test all False
         np.array([True, False, False]),
         np.array([[True, False], [False, True]]),
         np.array([[[False, False], [False, False]]]),
         # Some slices:
         slice(-5, 5, 2),
         slice(1, 1, 100),
         slice(4, -1, -2),
         slice(None, None, -3),
         # Some Fancy indexes:
         np.empty((0, 1, 1), dtype=np.intp),  # empty and can be broadcast
         np.array([0, 1, -2]),
         np.array([[2], [0], [1]]),
         np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
         np.array([2, -1], dtype=np.int8),
         np.zeros([1]*31, dtype=int),  # trigger too large array.
         np.array([0., 1.])]  # invalid datatype
     # Some simpler indices that still cover a bit more
     self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
                            'skip']
     # Very simple ones to fill the rest:
     self.fill_indices = [slice(None, None), 0]
Exemplo n.º 3
0
def normalize_descr(descr):
    "Normalize a description adding the platform byteorder."

    out = []
    for item in descr:
        dtype = item[1]
        if isinstance(dtype, str):
            if dtype[0] not in ['|', '<', '>']:
                onebyte = dtype[1:] == "1"
                if onebyte or dtype[0] in ['S', 'V', 'b']:
                    dtype = "|" + dtype
                else:
                    dtype = byteorder + dtype
            if len(item) > 2 and np.prod(item[2]) > 1:
                nitem = (item[0], dtype, item[2])
            else:
                nitem = (item[0], dtype)
            out.append(nitem)
        elif isinstance(item[1], list):
            l = []
            for j in normalize_descr(item[1]):
                l.append(j)
            out.append((item[0], l))
        else:
            raise ValueError("Expected a str or list and got %s" %
                             (type(item)))
    return out
Exemplo n.º 4
0
 def buffer_length(arr):
     if isinstance(arr, unicode):
         arr = str(arr)
         if not arr:
             charmax = 0
         else:
             charmax = max([ord(c) for c in arr])
         if charmax < 256:
             size = 1
         elif charmax < 65536:
             size = 2
         else:
             size = 4
         return size * len(arr)
     v = memoryview(arr)
     if v.shape is None:
         return len(v) * v.itemsize
     else:
         return np.prod(v.shape) * v.itemsize
Exemplo n.º 5
0
def flatten_dtype(ndtype, flatten_base=False):
    """
    Unpack a structured data-type by collapsing nested fields and/or fields
    with a shape.

    Note that the field names are lost.

    Parameters
    ----------
    ndtype : dtype
        The datatype to collapse
    flatten_base : bool, optional
       If True, transform a field with a shape into several fields. Default is
       False.

    Examples
    --------
    >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
    ...                ('block', int, (2, 3))])
    >>> np.lib._iotools.flatten_dtype(dt)
    [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
    >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
    [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
     dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
     dtype('int32')]

    """
    names = ndtype.names
    if names is None:
        if flatten_base:
            return [ndtype.base] * int(np.prod(ndtype.shape))
        return [ndtype.base]
    else:
        types = []
        for field in names:
            info = ndtype.fields[field]
            flat_dt = flatten_dtype(info[0], flatten_base)
            types.extend(flat_dt)
        return types
Exemplo n.º 6
0
    def _get_multi_index(self, arr, indices):
        """Mimic multi dimensional indexing.

        Parameters
        ----------
        arr : ndarray
            Array to be indexed.
        indices : tuple of index objects

        Returns
        -------
        out : ndarray
            An array equivalent to the indexing operation (but always a copy).
            `arr[indices]` should be identical.
        no_copy : bool
            Whether the indexing operation requires a copy. If this is `True`,
            `np.may_share_memory(arr, arr[indices])` should be `True` (with
            some exceptions for scalars and possibly 0-d arrays).

        Notes
        -----
        While the function may mostly match the errors of normal indexing this
        is generally not the case.
        """
        in_indices = list(indices)
        indices = []
        # if False, this is a fancy or boolean index
        no_copy = True
        # number of fancy/scalar indexes that are not consecutive
        num_fancy = 0
        # number of dimensions indexed by a "fancy" index
        fancy_dim = 0
        # NOTE: This is a funny twist (and probably OK to change).
        # The boolean array has illegal indexes, but this is
        # allowed if the broadcast fancy-indices are 0-sized.
        # This variable is to catch that case.
        error_unless_broadcast_to_empty = False

        # We need to handle Ellipsis and make arrays from indices, also
        # check if this is fancy indexing (set no_copy).
        ndim = 0
        ellipsis_pos = None  # define here mostly to replace all but first.
        for i, indx in enumerate(in_indices):
            if indx is None:
                continue
            if isinstance(indx, np.ndarray) and indx.dtype == bool:
                no_copy = False
                if indx.ndim == 0:
                    raise IndexError
                # boolean indices can have higher dimensions
                ndim += indx.ndim
                fancy_dim += indx.ndim
                continue
            if indx is Ellipsis:
                if ellipsis_pos is None:
                    ellipsis_pos = i
                    continue  # do not increment ndim counter
                raise IndexError
            if isinstance(indx, slice):
                ndim += 1
                continue
            if not isinstance(indx, np.ndarray):
                # This could be open for changes in numpy.
                # numpy should maybe raise an error if casting to intp
                # is not safe. It rejects np.array([1., 2.]) but not
                # [1., 2.] as index (same for ie. np.take).
                # (Note the importance of empty lists if changing this here)
                try:
                    indx = np.array(indx, dtype=np.intp)
                except ValueError:
                    raise IndexError
                in_indices[i] = indx
            elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
                raise IndexError('arrays used as indices must be of '
                                 'integer (or boolean) type')
            if indx.ndim != 0:
                no_copy = False
            ndim += 1
            fancy_dim += 1

        if arr.ndim - ndim < 0:
            # we can't take more dimensions then we have, not even for 0-d
            # arrays.  since a[()] makes sense, but not a[(),]. We will
            # raise an error later on, unless a broadcasting error occurs
            # first.
            raise IndexError

        if ndim == 0 and None not in in_indices:
            # Well we have no indexes or one Ellipsis. This is legal.
            return arr.copy(), no_copy

        if ellipsis_pos is not None:
            in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
                                                       (arr.ndim - ndim))

        for ax, indx in enumerate(in_indices):
            if isinstance(indx, slice):
                # convert to an index array
                indx = np.arange(*indx.indices(arr.shape[ax]))
                indices.append(['s', indx])
                continue
            elif indx is None:
                # this is like taking a slice with one element from a new axis:
                indices.append(['n', np.array([0], dtype=np.intp)])
                arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
                continue
            if isinstance(indx, np.ndarray) and indx.dtype == bool:
                if indx.shape != arr.shape[ax:ax+indx.ndim]:
                    raise IndexError

                try:
                    flat_indx = np.ravel_multi_index(np.nonzero(indx),
                                    arr.shape[ax:ax+indx.ndim], mode='raise')
                except Exception:
                    error_unless_broadcast_to_empty = True
                    # fill with 0s instead, and raise error later
                    flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
                # concatenate axis into a single one:
                if indx.ndim != 0:
                    arr = arr.reshape((arr.shape[:ax]
                                  + (np.prod(arr.shape[ax:ax+indx.ndim]),)
                                  + arr.shape[ax+indx.ndim:]))
                    indx = flat_indx
                else:
                    # This could be changed, a 0-d boolean index can
                    # make sense (even outside the 0-d indexed array case)
                    # Note that originally this is could be interpreted as
                    # integer in the full integer special case.
                    raise IndexError
            else:
                # If the index is a singleton, the bounds check is done
                # before the broadcasting. This used to be different in <1.9
                if indx.ndim == 0:
                    if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
                        raise IndexError
            if indx.ndim == 0:
                # The index is a scalar. This used to be two fold, but if
                # fancy indexing was active, the check was done later,
                # possibly after broadcasting it away (1.7. or earlier).
                # Now it is always done.
                if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
                    raise IndexError
            if (len(indices) > 0 and
                    indices[-1][0] == 'f' and
                    ax != ellipsis_pos):
                # NOTE: There could still have been a 0-sized Ellipsis
                # between them. Checked that with ellipsis_pos.
                indices[-1].append(indx)
            else:
                # We have a fancy index that is not after an existing one.
                # NOTE: A 0-d array triggers this as well, while one may
                # expect it to not trigger it, since a scalar would not be
                # considered fancy indexing.
                num_fancy += 1
                indices.append(['f', indx])

        if num_fancy > 1 and not no_copy:
            # We have to flush the fancy indexes left
            new_indices = indices[:]
            axes = list(range(arr.ndim))
            fancy_axes = []
            new_indices.insert(0, ['f'])
            ni = 0
            ai = 0
            for indx in indices:
                ni += 1
                if indx[0] == 'f':
                    new_indices[0].extend(indx[1:])
                    del new_indices[ni]
                    ni -= 1
                    for ax in range(ai, ai + len(indx[1:])):
                        fancy_axes.append(ax)
                        axes.remove(ax)
                ai += len(indx) - 1  # axis we are at
            indices = new_indices
            # and now we need to transpose arr:
            arr = arr.transpose(*(fancy_axes + axes))

        # We only have one 'f' index now and arr is transposed accordingly.
        # Now handle newaxis by reshaping...
        ax = 0
        for indx in indices:
            if indx[0] == 'f':
                if len(indx) == 1:
                    continue
                # First of all, reshape arr to combine fancy axes into one:
                orig_shape = arr.shape
                orig_slice = orig_shape[ax:ax + len(indx[1:])]
                arr = arr.reshape((arr.shape[:ax]
                                    + (np.prod(orig_slice).astype(int),)
                                    + arr.shape[ax + len(indx[1:]):]))

                # Check if broadcasting works
                res = np.broadcast(*indx[1:])
                # unfortunately the indices might be out of bounds. So check
                # that first, and use mode='wrap' then. However only if
                # there are any indices...
                if res.size != 0:
                    if error_unless_broadcast_to_empty:
                        raise IndexError
                    for _indx, _size in zip(indx[1:], orig_slice):
                        if _indx.size == 0:
                            continue
                        if np.any(_indx >= _size) or np.any(_indx < -_size):
                                raise IndexError
                if len(indx[1:]) == len(orig_slice):
                    if np.product(orig_slice) == 0:
                        # Work around for a crash or IndexError with 'wrap'
                        # in some 0-sized cases.
                        try:
                            mi = np.ravel_multi_index(indx[1:], orig_slice,
                                                      mode='raise')
                        except Exception:
                            # This happens with 0-sized orig_slice (sometimes?)
                            # here it is a ValueError, but indexing gives a:
                            raise IndexError('invalid index into 0-sized')
                    else:
                        mi = np.ravel_multi_index(indx[1:], orig_slice,
                                                  mode='wrap')
                else:
                    # Maybe never happens...
                    raise ValueError
                arr = arr.take(mi.ravel(), axis=ax)
                try:
                    arr = arr.reshape((arr.shape[:ax]
                                        + mi.shape
                                        + arr.shape[ax+1:]))
                except ValueError:
                    # too many dimensions, probably
                    raise IndexError
                ax += mi.ndim
                continue

            # If we are here, we have a 1D array for take:
            arr = arr.take(indx[1], axis=ax)
            ax += 1

        return arr, no_copy
Exemplo n.º 7
0
 def test_nanprod(self):
     tgt = np.prod(self.mat)
     for mat in self.integer_arrays():
         assert_equal(np.nanprod(mat), tgt)