예제 #1
0
파일: _index.py 프로젝트: carterbox/cupy
    def __setitem__(self, key, x):
        row, col = self._parse_indices(key)

        if isinstance(row, _int_scalar_types) and\
                isinstance(col, _int_scalar_types):
            x = cupy.asarray(x, dtype=self.dtype)
            if x.size != 1:
                raise ValueError('Trying to assign a sequence to an item')
            self._set_intXint(row, col, x.flat[0])
            return

        if isinstance(row, slice):
            row = cupy.arange(*row.indices(self.shape[0]))[:, None]
        else:
            row = cupy.atleast_1d(row)

        if isinstance(col, slice):
            col = cupy.arange(*col.indices(self.shape[1]))[None, :]
            if row.ndim == 1:
                row = row[:, None]
        else:
            col = cupy.atleast_1d(col)

        i, j = cupy.broadcast_arrays(row, col)
        if i.shape != j.shape:
            raise IndexError('number of row and column indices differ')

        if isspmatrix(x):
            if i.ndim == 1:
                # Inner indexing, so treat them like row vectors.
                i = i[None]
                j = j[None]
            broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
            broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
            if not ((broadcast_row or x.shape[0] == i.shape[0]) and
                    (broadcast_col or x.shape[1] == i.shape[1])):
                raise ValueError('shape mismatch in assignment')
            if x.size == 0:
                return
            x = x.tocoo(copy=True)
            x.sum_duplicates()
            self._set_arrayXarray_sparse(i, j, x)
        else:
            # Make x and i into the same shape
            x = cupy.asarray(x, dtype=self.dtype)
            x, _ = cupy.broadcast_arrays(x, i)
            if x.size == 0:
                return
            x = x.reshape(i.shape)
            self._set_arrayXarray(i, j, x)
예제 #2
0
def normal(loc=0.0, scale=1.0, size=None, dtype=float):
    """Returns an array of normally distributed samples.

    Args:
        loc (float or array_like of floats): Mean of the normal distribution.
        scale (float or array_like of floats):
            Standard deviation of the normal distribution.
        size (int or tuple of ints): The shape of the array. If ``None``, a
            zero-dimensional array is generated.
        dtype: Data type specifier. Only :class:`numpy.float32` and
            :class:`numpy.float64` types are allowed.

    Returns:
        cupy.ndarray: Normally distributed samples.

    .. seealso:: :func:`numpy.random.normal`

    """
    rs = _generator.get_random_state()
    if size is None and any(isinstance(arg, cupy.ndarray)
                            for arg in [scale, loc]):
        size = cupy.broadcast_arrays(loc, scale)[0].shape
    x = rs.normal(0, 1, size, dtype)
    cupy.multiply(x, scale, out=x)
    cupy.add(x, loc, out=x)
    return x
예제 #3
0
def _ndim_coords_from_arrays(points, ndim=None):
    """
    Convert a tuple of coordinate arrays to a (..., ndim)-shaped array.

    """
    if isinstance(points, tuple) and len(points) == 1:
        # handle argument tuple
        points = points[0]
    if isinstance(points, tuple):
        p = cp.broadcast_arrays(*points)
        n = len(p)
        for j in range(1, n):
            if p[j].shape != p[0].shape:
                raise ValueError(
                    "coordinate arrays do not have the same shape")
        points = cp.empty(p[0].shape + (len(points), ), dtype=float)
        for j, item in enumerate(p):
            points[..., j] = item
    else:
        points = cp.asarray(points)
        if points.ndim == 1:
            if ndim is None:
                points = points.reshape(-1, 1)
            else:
                points = points.reshape(-1, ndim)
    return points
예제 #4
0
 def rvs(self, a, b, loc, scale, size):
     self.a, self.b = cp.broadcast_arrays(a, b)
     H,N = size
     U = cp.random.uniform(low=0, high=1, size=(H,N))
     x = self._truncnorm_ppf(U, N)
     x = x*scale + loc
     return x
예제 #5
0
def broadcast_arrays(*arrays: Array) -> List[Array]:
    """
    Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.

    See its docstring for more information.
    """
    from ._array_object import Array

    return [
        Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
    ]
예제 #6
0
    def __getitem__(self, key):

        # For testing- Scipy >= 1.4.0 is needed to guarantee
        # results match.
        if scipy_available and numpy.lib.NumpyVersion(
                scipy.__version__) < '1.4.0':
            raise NotImplementedError(
                "Sparse __getitem__() requires Scipy >= 1.4.0")

        row, col = self._parse_indices(key)

        # Dispatch to specialized methods.
        if isinstance(row, _int_scalar_types):
            if isinstance(col, _int_scalar_types):
                return self._get_intXint(row, col)
            elif isinstance(col, slice):
                return self._get_intXslice(row, col)
            elif col.ndim == 1:
                return self._get_intXarray(row, col)
            raise IndexError('index results in >2 dimensions')
        elif isinstance(row, slice):
            if isinstance(col, _int_scalar_types):
                return self._get_sliceXint(row, col)
            elif isinstance(col, slice):
                if row == slice(None) and row == col:
                    return self.copy()
                return self._get_sliceXslice(row, col)
            elif col.ndim == 1:
                return self._get_sliceXarray(row, col)
            raise IndexError('index results in >2 dimensions')
        elif row.ndim == 1:
            if isinstance(col, _int_scalar_types):
                return self._get_arrayXint(row, col)
            elif isinstance(col, slice):
                return self._get_arrayXslice(row, col)
        else:  # row.ndim == 2
            if isinstance(col, _int_scalar_types):
                return self._get_arrayXint(row, col)
            elif isinstance(col, slice):
                raise IndexError('index results in >2 dimensions')
            elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1):
                # special case for outer indexing
                return self._get_columnXarray(row[:, 0], col.ravel())

        # The only remaining case is inner (fancy) indexing
        row, col = cupy.broadcast_arrays(row, col)
        if row.shape != col.shape:
            raise IndexError('number of row and column indices differ')
        if row.size == 0:
            return self.__class__(cupy.atleast_2d(row).shape, dtype=self.dtype)
        return self._get_arrayXarray(row, col)
예제 #7
0
    def reshape_backward(self):

        dx_reshaped = cp.zeros_like(self.x_reshaped)
        out_newaxis = self.output_tensor[:, :, :, cp.newaxis, :, cp.newaxis]
        mask = (self.x_reshaped == out_newaxis)
        dout_newaxis = self.grads[:, :, :, cp.newaxis, :, cp.newaxis]
        dout_broadcast, _ = cp.broadcast_arrays(dout_newaxis, dx_reshaped)
        dx_reshaped[mask] = dout_broadcast[mask]
        dx_reshaped /= cp.sum(mask, axis=(3, 5), keepdims=True)
        outputs = dx_reshaped.reshape(self.input_tensor.shape)
        for layer in self.inbounds:
            if layer.require_grads:
                layer.grads += outputs
            else:
                layer.grads = self.grads
예제 #8
0
파일: _polygamma.py 프로젝트: zhaohb/cupy
def polygamma(n, x):
    """Polygamma function n.

    Args:
        n (cupy.ndarray): The order of the derivative of `psi`.
        x (cupy.ndarray): Where to evaluate the polygamma function.

    Returns:
        cupy.ndarray: The result.

    .. seealso:: :data:`scipy.special.polygamma`

    """
    n, x = cupy.broadcast_arrays(n, x)
    fac2 = (-1.0)**(n + 1) * _gamma.gamma(n + 1.0) * _zeta.zeta(n + 1.0, x)
    return cupy.where(n == 0, _digamma.digamma(x), fac2)
예제 #9
0
def zeta(x, q):
    """Hurwitz zeta function.

    Args:
        x (cupy.ndarray): Input data, must be real.
        q (cupy.ndarray): Input data, must be real.

    Returns:
        cupy.ndarray: Values of zeta(x, q).

    .. seealso:: :data:`scipy.special.zeta`

    """
    if x.dtype.char in '?ebBhH':
        x = x.astype(cupy.float32)
    elif x.dtype.char in 'iIlLqQ':
        x = x.astype(cupy.float64)
    q = q.astype(x.dtype)
    x, q = cupy.broadcast_arrays(x, q)
    y = cupy.zeros_like(x)
    _get_zeta_kernel()(x, q, y)
    return y
예제 #10
0
def cupy_ravel_multi_index(multi_index, dims):
    """
    Sadly  CuPy 7.6.0 does not have ravel_multi_index function (only from 8 version)
    Implement simple version of it.

    :param multi_index: multi indexes
    :param dims: dimension of target
    """
    ndim = len(dims)

    s = 1
    ravel_strides = [1] * ndim

    for i in range(ndim - 2, -1, -1):
        s = s * dims[i + 1]
        ravel_strides[i] = s

    multi_index = cp.broadcast_arrays(*multi_index)
    raveled_indices = cp.zeros(multi_index[0].shape, dtype=cp.int64)
    for d, stride, idx in zip(dims, ravel_strides, multi_index):
        idx = idx.astype(cp.int64, copy=False)

        raveled_indices += stride * idx
    return raveled_indices
예제 #11
0
def _select(input, labels=None, index=None, find_min=False, find_max=False,
            find_min_positions=False, find_max_positions=False,
            find_median=False):
    """Return one or more of: min, max, min position, max position, median.

    If neither `labels` or `index` is provided, these are the global values
    in `input`. If `index` is None, but `labels` is provided, a global value
    across all non-zero labels is given. When both `labels` and `index` are
    provided, lists of values are provided for each labeled region specified
    in `index`. See further details in :func:`cupyx.scipy.ndimage.minimum`,
    etc.

    Used by minimum, maximum, minimum_position, maximum_position, extrema.
    """
    find_positions = find_min_positions or find_max_positions
    positions = None
    if find_positions:
        positions = cupy.arange(input.size).reshape(input.shape)

    def single_group(vals, positions):
        result = []
        if find_min:
            result += [vals.min()]
        if find_min_positions:
            result += [positions[vals == vals.min()][0]]
        if find_max:
            result += [vals.max()]
        if find_max_positions:
            result += [positions[vals == vals.max()][0]]
        if find_median:
            result += [cupy.median(vals)]
        return result

    if labels is None:
        return single_group(input, positions)

    # ensure input and labels match sizes
    input, labels = cupy.broadcast_arrays(input, labels)

    if index is None:
        mask = labels > 0
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    if cupy.isscalar(index):
        mask = labels == index
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    index = cupy.asarray(index)

    safe_int = _safely_castable_to_int(labels.dtype)
    min_label = labels.min()
    max_label = labels.max()

    # Remap labels to unique integers if necessary, or if the largest label is
    # larger than the number of values.
    if (not safe_int or min_label < 0 or max_label > labels.size):
        # Remap labels, and indexes
        unique_labels, labels = cupy.unique(labels, return_inverse=True)
        idxs = cupy.searchsorted(unique_labels, index)

        # Make all of idxs valid
        idxs[idxs >= unique_labels.size] = 0
        found = unique_labels[idxs] == index
    else:
        # Labels are an integer type, and there aren't too many
        idxs = cupy.asanyarray(index, int).copy()
        found = (idxs >= 0) & (idxs <= max_label)

    idxs[~found] = max_label + 1

    input = input.ravel()
    labels = labels.ravel()
    if find_positions:
        positions = positions.ravel()

    using_cub = _core._accelerator.ACCELERATOR_CUB in \
        cupy._core.get_routine_accelerators()

    if using_cub:
        # Cutoff values below were determined empirically for relatively large
        # input arrays.
        if find_positions or find_median:
            n_label_cutoff = 15
        else:
            n_label_cutoff = 30
    else:
        n_label_cutoff = 0

    if n_label_cutoff and len(idxs) <= n_label_cutoff:
        return _select_via_looping(
            input, labels, idxs, positions, find_min, find_min_positions,
            find_max, find_max_positions, find_median
        )

    order = cupy.lexsort(cupy.stack((input.ravel(), labels.ravel())))
    input = input[order]
    labels = labels[order]
    if find_positions:
        positions = positions[order]

    # Determine indices corresponding to the min or max value for each label
    label_change_index = cupy.searchsorted(labels,
                                           cupy.arange(1, max_label + 2))
    if find_min or find_min_positions or find_median:
        # index corresponding to the minimum value at each label
        min_index = label_change_index[:-1]
    if find_max or find_max_positions or find_median:
        # index corresponding to the maximum value at each label
        max_index = label_change_index[1:] - 1

    result = []
    # the order below matches the order expected by cupy.ndimage.extrema
    if find_min:
        mins = cupy.zeros(int(labels.max()) + 2, input.dtype)
        mins[labels[min_index]] = input[min_index]
        result += [mins[idxs]]
    if find_min_positions:
        minpos = cupy.zeros(labels.max().item() + 2, int)
        minpos[labels[min_index]] = positions[min_index]
        result += [minpos[idxs]]
    if find_max:
        maxs = cupy.zeros(int(labels.max()) + 2, input.dtype)
        maxs[labels[max_index]] = input[max_index]
        result += [maxs[idxs]]
    if find_max_positions:
        maxpos = cupy.zeros(labels.max().item() + 2, int)
        maxpos[labels[max_index]] = positions[max_index]
        result += [maxpos[idxs]]
    if find_median:
        locs = cupy.arange(len(labels))
        lo = cupy.zeros(int(labels.max()) + 2, int)
        lo[labels[min_index]] = locs[min_index]
        hi = cupy.zeros(int(labels.max()) + 2, int)
        hi[labels[max_index]] = locs[max_index]
        lo = lo[idxs]
        hi = hi[idxs]
        # lo is an index to the lowest value in input for each label,
        # hi is an index to the largest value.
        # move them to be either the same ((hi - lo) % 2 == 0) or next
        # to each other ((hi - lo) % 2 == 1), then average.
        step = (hi - lo) // 2
        lo += step
        hi -= step
        if input.dtype.kind in 'iub':
            # fix for https://github.com/scipy/scipy/issues/12836
            result += [(input[lo].astype(float) + input[hi].astype(float)) /
                       2.0]
        else:
            result += [(input[lo] + input[hi]) / 2.0]

    return result
예제 #12
0
def mean(input, labels=None, index=None):
    """Calculates the mean of the values of an n-D image array, optionally
       at specified sub-regions.

    Args:
        input (cupy.ndarray): Nd-image data to process.
        labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
            If not None, must be same shape as `input`.
        index (cupy.ndarray or None): `labels` to include in output. If None
            (default), all values where `labels` is non-zero are used.

    Returns:
        mean (cupy.ndarray): mean of values, for each sub-region if
        `labels` and `index` are specified.


    .. seealso:: :func:`scipy.ndimage.mean`
    """
    if not isinstance(input, cupy.ndarray):
        raise TypeError('input must be cupy.ndarray')

    if input.dtype in (cupy.complex64, cupy.complex128):
        raise TypeError("cupyx.scipy.ndimage.mean does not support %{}".format(
            input.dtype.type))

    use_kern = False
    # There is constraints on types because of atomicAdd() in CUDA.
    if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
                           cupy.float64, cupy.uint32, cupy.uint64,
                           cupy.ulonglong]:
        warnings.warn(
            'Using the slower implmentation as '
            'cupyx.scipy.ndimage.mean supports int32, float16, '
            'float32, float64, uint32, uint64 as data types '
            'for the fast implmentation', _util.PerformanceWarning)
        use_kern = True

    def calc_mean_with_intermediate_float(input):
        sum = input.sum()
        count = input.size
        # Does not use `ndarray.mean()` here to return the same results as
        # SciPy does, especially in case `input`'s dtype is float16.
        return sum / cupy.asanyarray(count).astype(float)

    if labels is None:
        return calc_mean_with_intermediate_float(input)

    if not isinstance(labels, cupy.ndarray):
        raise TypeError('label must be cupy.ndarray')

    input, labels = cupy.broadcast_arrays(input, labels)

    if index is None:
        return calc_mean_with_intermediate_float(input[labels > 0])

    if cupy.isscalar(index):
        return calc_mean_with_intermediate_float(input[labels == index])

    if not isinstance(index, cupy.ndarray):
        if not isinstance(index, int):
            raise TypeError('index must be cupy.ndarray or a scalar int')
        else:
            return (input[labels == index]).mean(dtype=cupy.float64)

    return _mean_driver(input, labels, index, use_kern=use_kern)
예제 #13
0
def sum_labels(input, labels=None, index=None):
    """Calculates the sum of the values of an n-D image array, optionally
       at specified sub-regions.

    Args:
        input (cupy.ndarray): Nd-image data to process.
        labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
            If not None, must be same shape as `input`.
        index (cupy.ndarray or None): `labels` to include in output. If None
            (default), all values where `labels` is non-zero are used.

    Returns:
       sum (cupy.ndarray): sum of values, for each sub-region if
       `labels` and `index` are specified.

    .. seealso:: :func:`scipy.ndimage.sum_labels`
    """
    if not isinstance(input, cupy.ndarray):
        raise TypeError('input must be cupy.ndarray')

    if input.dtype in (cupy.complex64, cupy.complex128):
        raise TypeError("cupyx.scipy.ndimage.sum does not support %{}".format(
            input.dtype.type))

    use_kern = False
    # There is constraints on types because of atomicAdd() in CUDA.
    if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
                           cupy.float64, cupy.uint32, cupy.uint64,
                           cupy.ulonglong]:
        warnings.warn(
            'Using the slower implmentation as '
            'cupyx.scipy.ndimage.sum supports int32, float16, '
            'float32, float64, uint32, uint64 as data types'
            'for the fast implmentation', _util.PerformanceWarning)
        use_kern = True

    if labels is None:
        return input.sum()

    if not isinstance(labels, cupy.ndarray):
        raise TypeError('label must be cupy.ndarray')

    input, labels = cupy.broadcast_arrays(input, labels)

    if index is None:
        return input[labels != 0].sum()

    if not isinstance(index, cupy.ndarray):
        if not isinstance(index, int):
            raise TypeError('index must be cupy.ndarray or a scalar int')
        else:
            return (input[labels == index]).sum()

    if index.size == 0:
        return cupy.array([], dtype=cupy.int64)

    out = cupy.zeros_like(index, dtype=cupy.float64)

    # The following parameters for sum where determined using a Tesla P100.
    if (input.size >= 262144 and index.size <= 4) or use_kern:
        return _ndimage_sum_kernel_2(input, labels, index, out)
    return _ndimage_sum_kernel(input, labels, index, index.size, out)
예제 #14
0
def variance(input, labels=None, index=None):
    """Calculates the variance of the values of an n-D image array, optionally
    at specified sub-regions.

    Args:
        input (cupy.ndarray): Nd-image data to process.
        labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
            If not None, must be same shape as `input`.
        index (cupy.ndarray or None): `labels` to include in output. If None
            (default), all values where `labels` is non-zero are used.

    Returns:
        cupy.ndarray: Values of variance, for each sub-region if
        `labels` and `index` are specified.

    .. seealso:: :func:`scipy.ndimage.variance`
    """
    if not isinstance(input, cupy.ndarray):
        raise TypeError('input must be cupy.ndarray')

    if input.dtype in (cupy.complex64, cupy.complex128):
        raise TypeError("cupyx.scipy.ndimage.variance doesn't support %{}"
                        "".format(input.dtype.type))

    use_kern = False
    # There are constraints on types because of atomicAdd() in CUDA.
    if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
                           cupy.float64, cupy.uint32, cupy.uint64,
                           cupy.ulonglong]:
        warnings.warn(
            'Using the slower implementation because the provided '
            f'type {input.dtype} is not supported by cupyx.scipy.ndimage.sum. '
            'Consider using an array of type int32, float16, '
            'float32, float64, uint32, uint64 as data types '
            'for the fast implementation', _util.PerformanceWarning)
        use_kern = True

    def calc_var_with_intermediate_float(input):
        vals_c = input - input.mean()
        count = vals_c.size
        # Does not use `ndarray.mean()` here to return the same results as
        # SciPy does, especially in case `input`'s dtype is float16.
        return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float)

    if labels is None:
        return calc_var_with_intermediate_float(input)

    if not isinstance(labels, cupy.ndarray):
        raise TypeError('label must be cupy.ndarray')

    input, labels = cupy.broadcast_arrays(input, labels)

    if index is None:
        return calc_var_with_intermediate_float(input[labels > 0])

    if cupy.isscalar(index):
        return calc_var_with_intermediate_float(input[labels == index])

    if not isinstance(index, cupy.ndarray):
        if not isinstance(index, int):
            raise TypeError('index must be cupy.ndarray or a scalar int')
        else:
            return (input[labels == index]).var().astype(cupy.float64,
                                                         copy=False)

    mean_val, count = _mean_driver(input, labels, index, True, use_kern)
    if use_kern:
        new_axis = (..., *(cupy.newaxis for _ in range(input.ndim)))
        return cupy.where(labels[None, ...] == index[new_axis],
                          cupy.square(input - mean_val[new_axis]),
                          0).sum(tuple(range(1, input.ndim + 1))) / count
    out = cupy.zeros_like(index, dtype=cupy.float64)
    return _ndimage_variance_kernel(input, labels, index, index.size, mean_val,
                                    out) / count
예제 #15
0
def labeled_comprehension(
    input, labels, index, func, out_dtype, default, pass_positions=False
):
    """Array resulting from applying ``func`` to each labeled region.

    Roughly equivalent to [func(input[labels == i]) for i in index].

    Sequentially applies an arbitrary function (that works on array_like input)
    to subsets of an N-D image array specified by `labels` and `index`.
    The option exists to provide the function with positional parameters as the
    second argument.

    Args:
        input (cupy.ndarray): Data from which to select `labels` to process.
        labels (cupy.ndarray or None):  Labels to objects in `input`. If not
            None, array must be same shape as `input`. If None, `func` is
            applied to raveled `input`.
        index (int, sequence of ints or None): Subset of `labels` to which to
            apply `func`. If a scalar, a single value is returned. If None,
            `func` is applied to all non-zero values of `labels`.
        func (callable): Python function to apply to `labels` from `input`.
        out_dtype (dtype): Dtype to use for `result`.
        default (int, float or None): Default return value when a element of
            `index` does not exist in `labels`.
        pass_positions (bool, optional): If True, pass linear indices to `func`
            as a second argument.

    Returns:
        cupy.ndarray: Result of applying `func` to each of `labels` to `input`
        in `index`.

    .. seealso:: :func:`scipy.ndimage.labeled_comprehension`
    """
    as_scalar = cupy.isscalar(index)
    input = cupy.asarray(input)

    if pass_positions:
        positions = cupy.arange(input.size).reshape(input.shape)

    if labels is None:
        if index is not None:
            raise ValueError('index without defined labels')
        if not pass_positions:
            return func(input.ravel())
        else:
            return func(input.ravel(), positions.ravel())

    try:
        input, labels = cupy.broadcast_arrays(input, labels)
    except ValueError:
        raise ValueError(
            'input and labels must have the same shape '
            '(excepting dimensions with width 1)'
        )

    if index is None:
        if not pass_positions:
            return func(input[labels > 0])
        else:
            return func(input[labels > 0], positions[labels > 0])

    index = cupy.atleast_1d(index)
    if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index):
        raise ValueError(
            'Cannot convert index values from <%s> to <%s> '
            '(labels.dtype) without loss of precision'
            % (index.dtype, labels.dtype)
        )

    index = index.astype(labels.dtype)

    # optimization: find min/max in index, and select those parts of labels,
    #               input, and positions
    lo = index.min()
    hi = index.max()
    mask = (labels >= lo) & (labels <= hi)

    # this also ravels the arrays
    labels = labels[mask]
    input = input[mask]
    if pass_positions:
        positions = positions[mask]

    # sort everything by labels
    label_order = labels.argsort()
    labels = labels[label_order]
    input = input[label_order]
    if pass_positions:
        positions = positions[label_order]

    index_order = index.argsort()
    sorted_index = index[index_order]

    def do_map(inputs, output):
        """labels must be sorted"""
        nidx = sorted_index.size

        # Find boundaries for each stretch of constant labels
        # This could be faster, but we already paid N log N to sort labels.
        lo = cupy.searchsorted(labels, sorted_index, side='left')
        hi = cupy.searchsorted(labels, sorted_index, side='right')

        for i, low, high in zip(range(nidx), lo, hi):
            if low == high:
                continue
            output[i] = func(*[inp[low:high] for inp in inputs])

    if out_dtype == object:
        temp = {i: default for i in range(index.size)}
    else:
        temp = cupy.empty(index.shape, out_dtype)
        if default is None and temp.dtype.kind in 'fc':
            default = numpy.nan  # match NumPy floating-point None behavior
        temp[:] = default

    if not pass_positions:
        do_map([input], temp)
    else:
        do_map([input, positions], temp)

    if out_dtype == object:
        # use a list of arrays since object arrays are not supported
        index_order = cupy.asnumpy(index_order)
        output = [temp[i] for i in index_order.argsort()]
    else:
        output = cupy.zeros(index.shape, out_dtype)
        output[cupy.asnumpy(index_order)] = temp
    if as_scalar:
        output = output[0]
    return output
예제 #16
0
 def _set_arrayXarray_sparse(self, row, col, x):
     # Fall back to densifying x
     x = cupy.asarray(x.toarray(), dtype=self.dtype)
     x, _ = cupy.broadcast_arrays(x, row)
     self._set_arrayXarray(row, col, x)
예제 #17
0
파일: ranges.py 프로젝트: viantirreau/cupy
def meshgrid(*xi, **kwargs):
    """Return coordinate matrices from coordinate vectors.

    Given one-dimensional coordinate arrays ``x1, x2, ... , xn`` this
    function makes N-D grids.

    For one-dimensional arrays ``x1, x2, ... , xn`` with lengths
    ``Ni = len(xi)``, this function returns ``(N1, N2, N3, ..., Nn)`` shaped
    arrays if indexing='ij' or ``(N2, N1, N3, ..., Nn)`` shaped arrays
    if indexing='xy'.

    Unlike NumPy, CuPy currently only supports 1-D arrays as inputs.

    Args:
        xi (tuple of ndarrays): 1-D arrays representing the coordinates
            of a grid.
        indexing ({'xy', 'ij'}, optional): Cartesian ('xy', default) or
            matrix ('ij') indexing of output.
        sparse (bool, optional): If ``True``, a sparse grid is returned in
            order to conserve memory. Default is ``False``.
        copy (bool, optional): If ``False``, a view
            into the original arrays are returned. Default is ``True``.

    Returns:
        list of cupy.ndarray

    .. seealso:: :func:`numpy.meshgrid`

    """
    indexing = kwargs.pop('indexing', 'xy')
    copy = bool(kwargs.pop('copy', True))
    sparse = bool(kwargs.pop('sparse', False))
    if kwargs:
        raise TypeError(
            'meshgrid() got an unexpected keyword argument \'{}\''.format(
                list(kwargs)[0]))
    if indexing not in ['xy', 'ij']:
        raise ValueError('Valid values for `indexing` are \'xy\' and \'ij\'.')

    for x in xi:
        if x.ndim != 1:
            raise ValueError('input has to be 1d')
        if not isinstance(x, cupy.ndarray):
            raise ValueError('input has to be cupy.ndarray')
    if len(xi) <= 1:
        return list(xi)

    meshes = []
    for i, x in enumerate(xi):
        if indexing == 'xy' and i == 0:
            left_none = 1
        elif indexing == 'xy' and i == 1:
            left_none = 0
        else:
            left_none = i

        expand_slices = ((None, ) * left_none + (slice(None), ) + (None, ) *
                         (len(xi) - (left_none + 1)))
        meshes.append(x[expand_slices])

    if sparse:
        meshes_br = meshes
    else:
        meshes_br = list(cupy.broadcast_arrays(*meshes))

    if copy:
        for i in range(len(meshes_br)):
            meshes_br[i] = meshes_br[i].copy()
    return meshes_br
예제 #18
0
def meshgrid(*xi, **kwargs):
    """Return coordinate matrices from coordinate vectors.

    Given one-dimensional coordinate arrays x1, x2, ..., xn, this function
    makes N-D grids.

    For one-dimensional arrays x1, x2, ..., xn with lengths ``Ni = len(xi)``,
    this function returns ``(N1, N2, N3, ..., Nn)`` shaped arrays
    if indexing='ij' or ``(N2, N1, N3, ..., Nn)`` shaped arrays
    if indexing='xy'.

    Unlike NumPy, CuPy currently only supports 1-D arrays as inputs.
    Also, CuPy does not support ``sparse`` option yet.

    Args:
        xi (tuple of ndarrays): 1-D arrays representing the coordinates
            of a grid.
        indexing ({'xy', 'ij'}, optional): Cartesian ('xy', default) or
            matrix ('ij') indexing of output.
        copy (bool, optional): If ``False``, a view
            into the original arrays are returned. Default is True.

    Returns:
        list of cupy.ndarray

    .. seealso:: :func:`numpy.meshgrid`

    """

    indexing = kwargs.pop('indexing', 'xy')
    copy = bool(kwargs.pop('copy', True))
    if kwargs:
        raise TypeError(
            'meshgrid() got an unexpected keyword argument \'{}\''.format(
                list(kwargs)[0]))
    if indexing not in ['xy', 'ij']:
        raise ValueError('Valid values for `indexing` are \'xy\' and \'ij\'.')

    for x in xi:
        if x.ndim != 1:
            raise ValueError('input has to be 1d')
        if not isinstance(x, cupy.ndarray):
            raise ValueError('input has to be cupy.ndarray')
    if len(xi) <= 1:
        return list(xi)

    meshes = []
    for i, x in enumerate(xi):
        if indexing == 'xy' and i == 0:
            left_none = 1
        elif indexing == 'xy' and i == 1:
            left_none = 0
        else:
            left_none = i

        expand_slices = ((None,) * left_none +
                         (slice(None),) +
                         (None,) * (len(xi) - (left_none + 1)))
        meshes.append(x[expand_slices])
    meshes_br = list(cupy.broadcast_arrays(*meshes))

    if copy:
        for i in range(len(meshes_br)):
            meshes_br[i] = meshes_br[i].copy()
    return meshes_br
예제 #19
0
파일: generate.py 프로젝트: zivzone/cupy
def ravel_multi_index(multi_index, dims, mode='wrap', order='C'):
    """
    Converts a tuple of index arrays into an array of flat indices, applying
    boundary modes to the multi-index.

    Args:
        multi_index (tuple of cupy.ndarray) : A tuple of integer arrays, one
            array for each dimension.
        dims (tuple of ints): The shape of array into which the indices from
            ``multi_index`` apply.
        mode ('raise', 'wrap' or 'clip'), optional: Specifies how out-of-bounds
            indices are handled.  Can specify either one mode or a tuple of
            modes, one mode per index:

            - *'raise'* -- raise an error
            - *'wrap'* -- wrap around (default)
            - *'clip'* -- clip to the range

            In 'clip' mode, a negative index which would normally wrap will
            clip to 0 instead.
        order ('C' or 'F'), optional: Determines whether the multi-index should
            be viewed as indexing in row-major (C-style) or column-major
            (Fortran-style) order.

    Returns:
        raveled_indices (cupy.ndarray): An array of indices into the flattened
            version of an array of dimensions ``dims``.

    .. warning::

        This function may synchronize the device when ``mode == 'raise'``.

    Notes
    -----
    Note that the default `mode` (``'wrap'``) is different than in NumPy. This
    is done to avoid potential device synchronization.

    Examples
    --------
    >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6))
    array([22, 41, 37])
    >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6),
    ...                        order='F')
    array([31, 41, 13])
    >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,6),
    ...                        mode='clip')
    array([22, 23, 19])
    >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,4),
    ...                        mode=('clip', 'wrap'))
    array([12, 13, 13])
    >>> cupy.ravel_multi_index(cupy.asarray((3,1,4,1)), (6,7,8,9))
    array(1621)

    .. seealso:: :func:`numpy.ravel_multi_index`, :func:`unravel_index`
    """

    ndim = len(dims)
    if len(multi_index) != ndim:
        raise ValueError(
            "parameter multi_index must be a sequence of "
            "length {}".format(ndim))

    for d in dims:
        if not isinstance(d, numbers.Integral):
            raise TypeError(
                "{} object cannot be interpreted as an integer".format(
                    type(d)))

    if isinstance(mode, str):
        mode = (mode, ) * ndim

    if functools.reduce(operator.mul, dims) > cupy.iinfo(cupy.int64).max:
        raise ValueError("invalid dims: array size defined by dims is larger "
                         "than the maximum possible size")

    s = 1
    ravel_strides = [1] * ndim
    if order is None:
        order = "C"
    if order == "C":
        for i in range(ndim - 2, -1, -1):
            s = s * dims[i + 1]
            ravel_strides[i] = s
    elif order == "F":
        for i in range(1, ndim):
            s = s * dims[i - 1]
            ravel_strides[i] = s
    else:
        raise TypeError("order not understood")

    multi_index = cupy.broadcast_arrays(*multi_index)
    raveled_indices = cupy.zeros(multi_index[0].shape, dtype=cupy.int64)
    for d, stride, idx, _mode in zip(dims, ravel_strides, multi_index, mode):

        if not isinstance(idx, cupy.ndarray):
            raise TypeError("elements of multi_index must be cupy arrays")
        if not cupy.can_cast(idx, cupy.int64, 'same_kind'):
            raise TypeError(
                'multi_index entries could not be cast from dtype(\'{}\') to '
                'dtype(\'{}\') according to the rule \'same_kind\''.format(
                    idx.dtype, cupy.int64().dtype))
        idx = idx.astype(cupy.int64, copy=False)

        if _mode == "raise":
            if cupy.any(cupy.logical_or(idx >= d, idx < 0)):
                raise ValueError("invalid entry in coordinates array")
        elif _mode == "clip":
            idx = cupy.clip(idx, 0, d - 1)
        elif _mode == 'wrap':
            idx = idx % d
        else:
            raise TypeError("Unrecognized mode: {}".format(_mode))
        raveled_indices += stride * idx
    return raveled_indices
예제 #20
0
def labeled_comprehension(input,
                          labels,
                          index,
                          func,
                          out_dtype,
                          default,
                          pass_positions=False):
    """
    Roughly equivalent to [func(input[labels == i]) for i in index].

    Sequentially applies an arbitrary function (that works on array_like input)
    to subsets of an N-D image array specified by `labels` and `index`.
    The option exists to provide the function with positional parameters as the
    second argument.

    Parameters
    ----------
    input : array_like
        Data from which to select `labels` to process.
    labels : array_like or None
        Labels to objects in `input`.
        If not None, array must be same shape as `input`.
        If None, `func` is applied to raveled `input`.
    index : int, sequence of ints or None
        Subset of `labels` to which to apply `func`.
        If a scalar, a single value is returned.
        If None, `func` is applied to all non-zero values of `labels`.
    func : callable
        Python function to apply to `labels` from `input`.
    out_dtype : dtype
        Dtype to use for `result`.
    default : int, float or None
        Default return value when a element of `index` does not exist
        in `labels`.
    pass_positions : bool, optional
        If True, pass linear indices to `func` as a second argument.
        Default is False.

    Returns
    -------
    result : ndarray
        Result of applying `func` to each of `labels` to `input` in `index`.

    Examples
    --------
    >>> import cupy as cp
    >>> a = cp.array([[1, 2, 0, 0],
    ...               [5, 3, 0, 4],
    ...               [0, 0, 0, 7],
    ...               [9, 3, 0, 0]])
    >>> from cupyimg.scipy import  ndimage
    >>> lbl, nlbl = ndimage.label(a)
    >>> lbls = cp.arange(1, nlbl+1)
    >>> ndimage.labeled_comprehension(a, lbl, lbls, cp.mean, float, 0)
    array([ 2.75,  5.5 ,  6.  ])

    Falling back to `default`:

    >>> lbls = cp.arange(1, nlbl+2)
    >>> ndimage.labeled_comprehension(a, lbl, lbls, cp.mean, float, -1)
    array([ 2.75,  5.5 ,  6.  , -1.  ])

    Passing positions:

    >>> def fn(val, pos):
    ...     print("fn says: %s : %s" % (val, pos))
    ...     return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
    ...
    >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
    fn says: [1 2 5 3] : [0 1 4 5]
    fn says: [4 7] : [ 7 11]
    fn says: [9 3] : [12 13]
    array([ 11.,  11., -12.,   0.])

    """

    as_scalar = cupy.isscalar(index)
    input = cupy.asarray(input)

    if pass_positions:
        positions = cupy.arange(input.size).reshape(input.shape)

    if labels is None:
        if index is not None:
            raise ValueError("index without defined labels")
        if not pass_positions:
            return func(input.ravel())
        else:
            return func(input.ravel(), positions.ravel())

    try:
        input, labels = cupy.broadcast_arrays(input, labels)
    except ValueError:
        raise ValueError("input and labels must have the same shape "
                         "(excepting dimensions with width 1)")

    if index is None:
        if not pass_positions:
            return func(input[labels > 0])
        else:
            return func(input[labels > 0], positions[labels > 0])

    index = cupy.atleast_1d(index)
    if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index):
        raise ValueError("Cannot convert index values from <%s> to <%s> "
                         "(labels' type) without loss of precision" %
                         (index.dtype, labels.dtype))

    index = index.astype(labels.dtype)

    # optimization: find min/max in index, and select those parts of labels, input, and positions
    lo = index.min()
    hi = index.max()
    mask = (labels >= lo) & (labels <= hi)

    # this also ravels the arrays
    labels = labels[mask]
    input = input[mask]
    if pass_positions:
        positions = positions[mask]

    # sort everything by labels
    label_order = labels.argsort()
    labels = labels[label_order]
    input = input[label_order]
    if pass_positions:
        positions = positions[label_order]

    index_order = index.argsort()
    sorted_index = index[index_order]

    def do_map(inputs, output):
        """labels must be sorted"""
        nidx = sorted_index.size

        # Find boundaries for each stretch of constant labels
        # This could be faster, but we already paid N log N to sort labels.
        lo = cupy.searchsorted(labels, sorted_index, side="left")
        hi = cupy.searchsorted(labels, sorted_index, side="right")

        for i, l, h in zip(range(nidx), lo, hi):
            if l == h:
                continue
            output[i] = cupy.asnumpy(func(*[inp[l:h] for inp in inputs]))

    temp = numpy.empty(index.shape, out_dtype)
    temp[:] = default
    if not pass_positions:
        do_map([input], temp)
    else:
        do_map([input, positions], temp)

    output = numpy.zeros(index.shape, out_dtype)
    output[cupy.asnumpy(index_order)] = temp
    if as_scalar:
        output = output[0]

    return output
예제 #21
0
def select(condlist, choicelist, default=0):
    """Return an array drawn from elements in choicelist, depending on conditions.

    Args:
        condlist (list of bool arrays): The list of conditions which determine
            from which array in `choicelist` the output elements are taken.
            When multiple conditions are satisfied, the first one encountered
            in `condlist` is used.
        choicelist (list of cupy.ndarray): The list of arrays from which the
            output elements are taken. It has to be of the same length
            as `condlist`.
        default (scalar) : If provided, will fill element inserted in `output`
            when all conditions evaluate to False. default value is 0.

    Returns:
        cupy.ndarray: The output at position m is the m-th element of the
        array in `choicelist` where the m-th element of the corresponding
        array in `condlist` is True.

    .. seealso:: :func:`numpy.select`
    """

    if len(condlist) != len(choicelist):
        raise ValueError(
            'list of cases must be same length as list of conditions')

    if len(condlist) == 0:
        raise ValueError("select with an empty condition list is not possible")

    if not cupy.isscalar(default):
        raise TypeError("default only accepts scalar values")

    for i in range(len(choicelist)):
        if not isinstance(choicelist[i], cupy.ndarray):
            raise TypeError("choicelist only accepts lists of cupy ndarrays")
        cond = condlist[i]
        if cond.dtype.type is not cupy.bool_:
            raise ValueError(
                'invalid entry {} in condlist: should be boolean ndarray'.
                format(i))

    dtype = cupy.result_type(*choicelist)

    condlist = cupy.broadcast_arrays(*condlist)
    choicelist = cupy.broadcast_arrays(*choicelist, default)

    if choicelist[0].ndim == 0:
        result_shape = condlist[0].shape
    else:
        result_shape = cupy.broadcast_arrays(condlist[0],
                                             choicelist[0])[0].shape

    result = cupy.empty(result_shape, dtype)
    cupy.copyto(result, default)

    choicelist = choicelist[-2::-1]
    condlist = condlist[::-1]
    for choice, cond in zip(choicelist, condlist):
        cupy.copyto(result, choice, where=cond)

    return result