def __mul__(self, other): if cupy.isscalar(other): self.sum_duplicates() return self._with_data(self.data * other) elif isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() return cusparse.csrgemm(self, other) elif csc.isspmatrix_csc(other): self.sum_duplicates() other.sum_duplicates() return cusparse.csrgemm(self, other.T, transb=True) elif base.isspmatrix(other): return self * other.tocsr() elif base.isdense(other): if other.ndim == 0: self.sum_duplicates() return self._with_data(self.data * other) elif other.ndim == 1: self.sum_duplicates() return cusparse.csrmv(self, cupy.asfortranarray(other)) elif other.ndim == 2: self.sum_duplicates() return cusparse.csrmm2(self, cupy.asfortranarray(other)) else: raise ValueError('could not interpret dimensions') else: return NotImplemented
def test_column_dtypes_correct(): msg = "mismatch with expected type," region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0] for col in COL_DTYPES: r = region[col] if col in OBJECT_COLUMNS: assert COL_DTYPES[col] == object continue # TODO: grlee77: check desired types for returned. # e.g. currently inertia_tensor_eigvals returns a list of 0-dim # arrays if isinstance(r, (tuple, list)): r0 = r[0] if isinstance(r0, cp.ndarray) and r0.ndim == 0: r0 = r0.item() t = type(r0) elif cp.isscalar(r): t = type(r) else: t = type(r.ravel()[0].item()) if cp.issubdtype(t, cp.floating): assert (COL_DTYPES[col] == float ), f"{col} dtype {t} {msg} {COL_DTYPES[col]}" elif cp.issubdtype(t, cp.integer): assert (COL_DTYPES[col] == int ), f"{col} dtype {t} {msg} {COL_DTYPES[col]}" else: assert False, f"{col} dtype {t} {msg} {COL_DTYPES[col]}"
def _add(self, other, lhs_negative, rhs_negative): if cupy.isscalar(other): if other == 0: if lhs_negative: return -self else: return self.copy() else: raise NotImplementedError( 'adding a nonzero scalar to a sparse matrix is not ' 'supported') elif base.isspmatrix(other): alpha = -1 if lhs_negative else 1 beta = -1 if rhs_negative else 1 return self._add_sparse(other, alpha, beta) elif base.isdense(other): if lhs_negative: if rhs_negative: return -self.todense() - other else: return other - self.todense() else: if rhs_negative: return self.todense() - other else: return self.todense() + other else: return NotImplemented
def __mul__(self, other): if cupy.isscalar(other): self.sum_duplicates() return self._with_data(self.data * other) elif cupyx.scipy.sparse.isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() if cusparse.check_availability('csrgemm'): a = self.T return cusparse.csrgemm(a, other, transa=True) elif cusparse.check_availability('csrgemm2'): a = self.tocsr() a.sum_duplicates() return cusparse.csrgemm2(a, other) else: raise NotImplementedError elif isspmatrix_csc(other): self.sum_duplicates() other.sum_duplicates() if cusparse.check_availability('csrgemm'): a = self.T b = other.T return cusparse.csrgemm(a, b, transa=True, transb=True) elif cusparse.check_availability('csrgemm2'): a = self.tocsr() b = other.tocsr() a.sum_duplicates() b.sum_duplicates() return cusparse.csrgemm2(a, b) else: raise NotImplementedError elif cupyx.scipy.sparse.isspmatrix(other): return self * other.tocsr() elif base.isdense(other): if other.ndim == 0: self.sum_duplicates() return self._with_data(self.data * other) elif other.ndim == 1: self.sum_duplicates() if cusparse.check_availability('csrmv'): csrmv = cusparse.csrmv elif cusparse.check_availability('spmv'): csrmv = cusparse.spmv else: raise NotImplementedError return csrmv(self.T, cupy.asfortranarray(other), transa=True) elif other.ndim == 2: self.sum_duplicates() if cusparse.check_availability('csrmm2'): csrmm = cusparse.csrmm2 elif cusparse.check_availability('spmm'): csrmm = cusparse.spmm else: raise NotImplementedError return csrmm(self.T, cupy.asfortranarray(other), transa=True) else: raise ValueError('could not interpret dimensions') else: return NotImplemented
def solveAnomaly(self, ex_mat=None, step=1, perm=None, parser=None): if ex_mat is None: ex_mat = cp.array(eit_scan_lines(20, 8)) else: ex_mat = cp.array(ex_mat) if type(step) is int: step_arr = step * cp.ones(20) elif type(step) is np.ndarray: if np.shape(step)[0] >= ex_mat.shape[0]: step_arr = cp.array(step) else: raise ValueError('Array is not long enough!') else: raise TypeError('Type of step is not int or ndarray!') # initialize the permittivity on element if perm is None: perm0 = self.tri_perm elif cp.isscalar(perm): perm0 = cp.ones(self.n_tri, dtype=float) else: assert perm.shape == (self.n_tri, ) perm0 = cp.array(perm) # calculate f and Jacobian iteratively over all stimulation lines #volt_mat = -1. * np.ones() ke = self.calculate_ke() #t_2 = pt() Ag = self.assemble_sparse(ke, self.tri, perm0, self.n_pts, ref=self.ref) #t_1 = pt() r_matrix = cp.linalg.inv(Ag) #r_matrix = cp.array(r_matrix) r_el = r_matrix[self.n_pts:] #t0 = pt() b = self._natural_boundary(ex_mat) f = cp.dot(r_matrix, b).T #t1 = pt() #t2 = pt() f_el = f[:, self.n_pts:] volt_mat, ex_mat, new_ind = self.voltMeter(ex_mat, step_arr) #t3 = pt() V = self.substractRow(f_el, volt_mat, new_ind) #t4=pt() ''' print('kg takes:', t_1-t_2) print('inv takes:', t0-t_1) print('dot product takes:', t1-t0) print('Solve takes:', t2-t1) print('voltmeter takes:', t3-t2) print('subtract_row takes:', t4-t3) ''' meas = cp.concatenate((ex_mat, volt_mat), axis=1) return cp.asnumpy(V), cp.asnumpy(meas), cp.asnumpy(new_ind)
def __init__(self, A, alpha): if not isinstance(A, LinearOperator): raise ValueError('LinearOperator expected as A') if not cupy.isscalar(alpha): raise ValueError('scalar expected as alpha') dtype = _get_dtype([A], [type(alpha)]) super(_ScaledLinearOperator, self).__init__(dtype, A.shape) self.args = (A, alpha)
def apply_along_axis(func1d, axis, arr, *args, **kwargs): """Apply a function to 1-D slices along the given axis. Args: func1d (function (M,) -> (Nj...)): This function should accept 1-D arrays. It is applied to 1-D slices of ``arr`` along the specified axis. It must return a 1-D ``cupy.ndarray``. axis (integer): Axis along which ``arr`` is sliced. arr (cupy.ndarray (Ni..., M, Nk...)): Input array. args: Additional arguments for ``func1d``. kwargs: Additional keyword arguments for ``func1d``. Returns: cupy.ndarray: The output array. The shape of ``out`` is identical to the shape of ``arr``, except along the ``axis`` dimension. This axis is removed, and replaced with new dimensions equal to the shape of the return value of ``func1d``. So if ``func1d`` returns a scalar ``out`` will have one fewer dimensions than ``arr``. .. seealso:: :func:`numpy.apply_along_axis` """ ndim = arr.ndim axis = internal._normalize_axis_index(axis, ndim) inarr_view = cupy.moveaxis(arr, axis, -1) # compute indices for the iteration axes, and append a trailing ellipsis to # prevent 0d arrays decaying to scalars inds = index_tricks.ndindex(inarr_view.shape[:-1]) inds = (ind + (Ellipsis,) for ind in inds) # invoke the function on the first item try: ind0 = next(inds) except StopIteration: raise ValueError( 'Cannot apply_along_axis when any iteration dimensions are 0' ) res = func1d(inarr_view[ind0], *args, **kwargs) if cupy.isscalar(res): # scalar outputs need to be transfered to a device ndarray res = cupy.asarray(res) # build a buffer for storing evaluations of func1d. # remove the requested axis, and add the new ones on the end. # laid out so that each write is contiguous. # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype) # save the first result, then compute and save all remaining results buff[ind0] = res for ind in inds: buff[ind] = func1d(inarr_view[ind], *args, **kwargs) # restore the inserted axes back to where they belong for i in range(res.ndim): buff = cupy.moveaxis(buff, -1, axis) return buff
def __rmul__(self, other): if cupy.isscalar(other) or isdense(other) and other.ndim == 0: return self * other else: try: tr = other.T except AttributeError: return NotImplemented return (self.T * tr).T
def _list_indexing(X, key, key_dtype): """Index a Python list.""" if np.isscalar(key) or isinstance(key, slice): # key is a slice or a scalar return X[key] if key_dtype == 'bool': # key is a boolean array-like return list(compress(X, key)) # key is a integer array-like of key return [X[idx] for idx in key]
def maximum_position(input, labels=None, index=None): """Find the positions of the maximums of the values of an array at labels. For each region specified by `labels`, the position of the maximum value of `input` within the region is returned. Args: input (cupy.ndarray): Array of values. For each region specified by `labels`, the maximal values of `input` over the region is computed. labels (cupy.ndarray, optional): An array of integers marking different regions over which the position of the maximum value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the location of the first maximum over the whole array is returned. The `labels` argument only works when `index` is specified. index (array_like, optional): A list of region labels that are taken into account for finding the location of the maxima. If `index` is None, the ``first`` maximum over all elements where `labels` is non-zero is returned. The `index` argument only works when `labels` is specified. Returns: Tuple of ints or list of tuples of ints that specify the location of maxima of `input` over the regions determaxed by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a tuple of ints is returned specifying the location of the first maximal value of `input`. .. note:: When `input` has multiple identical maxima within a labeled region, the coordinates returned are not guaranteed to match those returned by SciPy. .. seealso:: :func:`scipy.ndimage.maximum_position` """ dims = numpy.asarray(input.shape) # see numpy.unravel_index to understand this line. dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] result = _select(input, labels, index, find_max_positions=True)[0] # have to transfer result back to the CPU to return index tuples if result.ndim == 0: result = int(result) else: result = cupy.asnumpy(result) if cupy.isscalar(result): return tuple((result // dim_prod) % dims) return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
def _get_coeffs(x): if isinstance(x, cupy.poly1d): return x._coeffs if cupy.isscalar(x): return cupy.atleast_1d(x) if isinstance(x, cupy.ndarray): x = cupy.atleast_1d(x) if x.ndim == 1: return x raise ValueError('Multidimensional inputs are not supported') raise TypeError('Unsupported type')
def _infer_regionprop_dtype(func, *, intensity, ndim): """Infer the dtype of a region property calculated by func. If a region property function always returns the same shape and type of output regardless of input size, then the dtype is the dtype of the returned array. Otherwise, the property has object dtype. Parameters ---------- func : callable Function to be tested. The signature should be array[bool] -> Any if intensity is False, or *(array[bool], array[float]) -> Any otherwise. intensity : bool Whether the regionprop is calculated on an intensity image. ndim : int The number of dimensions for which to check func. Returns ------- dtype : NumPy data type The data type of the returned property. """ labels = [1, 2] sample = cp.zeros((3, ) * ndim, dtype=np.intp) sample[(0, ) * ndim] = labels[0] sample[(slice(1, None), ) * ndim] = labels[1] propmasks = [(sample == n) for n in labels] if intensity and _infer_number_of_required_args(func) == 2: def _func(mask): return func(mask, cp.random.random(sample.shape)) else: _func = func props1, props2 = map(_func, propmasks) if (cp.isscalar(props1) and cp.isscalar(props2) or cp.asarray(props1).shape == cp.asarray(props2).shape): dtype = cp.asarray(props1).dtype.type else: dtype = np.object_ return dtype
def update(self, z, R=None, H=None): """ Add a new measurement (z) to the Kalman filter. Parameters ---------- z : array(points, dim_z, 1) measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. If you pass in a value of H, z must be a column vector the of the correct size. R : array(points, dim_z, dim_z), scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : array(points, dim_z, dim_x), or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used. """ if z is None: return if R is None: R = self.R elif cp.isscalar(R): R = cp.repeat( (cp.identity(self.dim_z, dtype=self.x.dtype) * R)[cp.newaxis, :, :], self.points, axis=0, ) else: R = cp.asarray(R) if H is None: H = self.H else: H = cp.asarray(H) z = cp.asarray(z) self.update_kernel( self.x, z, H, self.P, R, )
def dot(self, x): """Matrix-matrix or matrix-vector multiplication. """ if isinstance(x, LinearOperator): return _ProductLinearOperator(self, x) elif cupy.isscalar(x): return _ScaledLinearOperator(self, x) else: if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: return self.matvec(x) elif x.ndim == 2: return self.matmat(x) else: raise ValueError('expected 1-d or 2-d array, got %r' % x)
def piecewise(x, condlist, funclist): """Evaluate a piecewise-defined function. Args: x (cupy.ndarray): input domain condlist (list of cupy.ndarray): Each boolean array/ scalar corresponds to a function in funclist. Length of funclist is equal to that of condlist. If one extra function is given, it is used as the default value when the otherwise condition is met funclist (list of scalars): list of scalar functions. Returns: cupy.ndarray: the scalar values in funclist on portions of x defined by condlist. .. warning:: This function currently doesn't support callable functions, args and kw parameters. .. seealso:: :func:`numpy.piecewise` """ if cupy.isscalar(condlist): condlist = [condlist] condlen = len(condlist) funclen = len(funclist) if condlen == funclen: out = cupy.zeros(x.shape, x.dtype) elif condlen + 1 == funclen: func = funclist[-1] funclist = funclist[:-1] if callable(func): raise NotImplementedError( 'Callable functions are not supported currently') out = cupy.empty(x.shape, x.dtype) out[...] = func else: raise ValueError('with {} condition(s), either {} or {} functions' ' are expected'.format(condlen, condlen, condlen + 1)) for condition, func in zip(condlist, funclist): if callable(func): raise NotImplementedError( 'Callable functions are not supported currently') if isinstance(func, cupy.ndarray): func = func.astype(x.dtype) _piecewise_krnl(condition, func, out) return out
def polyval(p, x): """Evaluates a polynomial at specific values. Args: p (cupy.ndarray or cupy.poly1d): input polynomial. x (scalar, cupy.ndarray): values at which the polynomial is evaluated. Returns: cupy.ndarray or cupy.poly1d: polynomial evaluated at x. .. warning:: This function doesn't currently support poly1d values to evaluate. .. seealso:: :func:`numpy.polyval` """ if isinstance(p, cupy.poly1d): p = p.coeffs if not isinstance(p, cupy.ndarray) or p.ndim == 0: raise TypeError('p can be 1d ndarray or poly1d object only') if p.ndim != 1: # to be consistent with polyarithmetic routines' behavior of # not allowing multidimensional polynomial inputs. raise ValueError('p can be 1d ndarray or poly1d object only') # TODO(Dahlia-Chehata): Support poly1d x if (isinstance(x, cupy.ndarray) and x.ndim <= 1) or numpy.isscalar(x): val = cupy.asarray(x).reshape(-1, 1) else: raise NotImplementedError( 'poly1d or non 1d values are not currently supported') out = p[::-1] * cupy.power(val, cupy.arange(p.size)) out = out.sum(axis=1) dtype = cupy.result_type(p, val) if cupy.isscalar(x) or x.ndim == 0: return out.astype(dtype, copy=False).reshape() if p.dtype == numpy.complex128 and val.dtype in [ numpy.float16, numpy.float32, numpy.complex64 ]: return out.astype(numpy.complex64, copy=False) p_kind_score = numpy.dtype(p.dtype.char.lower()).kind x_kind_score = numpy.dtype(val.dtype.char.lower()).kind if (p.dtype.kind not in 'c' and (p_kind_score == x_kind_score or val.dtype.kind in 'c')) or ( issubclass(p.dtype.type, numpy.integer) and issubclass(val.dtype.type, numpy.floating)): return out.astype(val.dtype, copy=False) return out.astype(dtype, copy=False)
def multiply(self, other): """Point-wise multiplication by another matrix, vector or scalar""" if cupy.isscalar(other): return multiply_by_scalar(self, other) elif _util.isdense(other): self.sum_duplicates() other = cupy.atleast_2d(other) return multiply_by_dense(self, other) elif isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() return multiply_by_csr(self, other) else: msg = 'expected scalar, dense matrix/vector or csr matrix' raise TypeError(msg)
def multigammaln(a, d): r"""Returns the log of multivariate gamma, also sometimes called the generalized gamma. Parameters ---------- a : cupy.ndarray The multivariate gamma is computed for each item of `a`. d : int The dimension of the space of integration. Returns ------- res : ndarray The values of the log multivariate gamma at the given points `a`. See Also -------- :func:`scipy.special.multigammaln` """ if not cupy.isscalar(d) or (math.floor(d) != d): raise ValueError("d should be a positive integer (dimension)") if cupy.isscalar(a): a = cupy.asarray(a, dtype=float) if int(cupy.any(a <= 0.5 * (d - 1))): raise ValueError("condition a > 0.5 * (d-1) not met") res = (d * (d - 1) * 0.25) * math.log(math.pi) gam0 = gammaln(a) if a.dtype.kind != 'f': # make sure all integer dtypes do the summation with float64 gam0 = gam0.astype(cupy.float64) res = res + gam0 for j in range(2, d + 1): res += gammaln(a - (j - 1.0) / 2) return res
def __truediv__(self, other): """Point-wise division by scalar""" if util.isscalarlike(other): if self.dtype == numpy.complex64: # Note: This is a work-around to make the output dtype the same # as SciPy. It might be SciPy version dependent. dtype = numpy.float32 else: if cupy.isscalar(other): dtype = numpy.float64 else: dtype = numpy.promote_types(numpy.float64, other.dtype) d = cupy.array(1. / other, dtype=dtype) return multiply_by_scalar(self, d) # TODO(anaruse): Implement divide by dense or sparse matrix raise NotImplementedError
def polyval(p, x): """Evaluates a polynomial at specific values. Args: p (cupy.ndarray or cupy.poly1d): input polynomial. x (scalar, cupy.ndarray): values at which the polynomial is evaluated. Returns: cupy.ndarray or cupy.poly1d: polynomial evaluated at x. .. warning:: This function doesn't currently support poly1d values to evaluate. .. seealso:: :func:`numpy.polyval` """ if isinstance(p, cupy.poly1d): p = p.coeffs if not isinstance(p, cupy.ndarray) or p.ndim == 0: raise TypeError('p must be 1d ndarray or poly1d object') if p.ndim > 1: raise ValueError('p must be 1d array') if isinstance(x, cupy.poly1d): # TODO(asi1024): Needs performance improvement. dtype = numpy.result_type(x.coeffs, 1) res = cupy.poly1d(cupy.array([0], dtype=dtype)) prod = cupy.poly1d(cupy.array([1], dtype=dtype)) for c in p[::-1]: res = res + prod * c prod = prod * x return res dtype = numpy.result_type(p.dtype.type(0), x) p = p.astype(dtype, copy=False) if p.size == 0: return cupy.zeros(x.shape, dtype) if dtype == numpy.bool_: return p.any() * x + p[-1] if not cupy.isscalar(x): x = cupy.asarray(x, dtype=dtype)[..., None] x = x ** cupy.arange(p.size, dtype=dtype) return (p[::-1] * x).sum(axis=-1, dtype=dtype)
def maximum(self, other): if cp.isscalar(other): self.getNdArray()[:] = cp.maximum(self.getNdArray(), other) return self elif isinstance(other, VectorCupy): if not self.checkSame(other): raise ValueError( 'Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape)) if not self._check_same_device(other): raise ValueError( 'Provided input has to live in the same device') self.getNdArray()[:] = cp.maximum(self.getNdArray(), other.getNdArray()) return self else: raise TypeError( "Provided input has to be either a scalar or a %s!" % self.whoami)
def __getitem__(self, index): scalar = cp.isscalar(index) if scalar: index = cp.asarray([index]) elif isinstance(index, slice): start = index.start or 0 # treat None or 0 the same way stop = index.stop if index.stop is not None else len(self) step = index.step index = cp.arange(start, stop, step) if index.dtype == bool: index = cp.flatnonzero(index) out = map_array( index, self.in_values.astype(index.dtype, copy=False), self.out_values, ) if scalar: out = out[0] # TODO: call .item() to transfer 0-dim array to host? return out
def __mul__(self, other): if cupy.isscalar(other): self.sum_duplicates() return self._with_data(self.data * other) elif isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() return cusparse.csrgemm(self, other) elif csc.isspmatrix_csc(other): self.sum_duplicates() other.sum_duplicates() return cusparse.csrgemm(self, other.T, transb=True) elif base.isspmatrix(other): return self * other.tocsr() elif base.isdense(other): if other.ndim == 0: self.sum_duplicates() return self._with_data(self.data * other) elif other.ndim == 1: self.sum_duplicates() other = cupy.asfortranarray(other) # csrmvEx does not work if nnz == 0 if self.nnz > 0 and cusparse.csrmvExIsAligned(self, other): if cupy.cuda.cub_enabled and other.flags.c_contiguous: return device_csrmv(self.shape[0], self.shape[1], self.nnz, self.data, self.indptr, self.indices, other) else: return cusparse.csrmvEx(self, other) else: return cusparse.csrmv(self, other) elif other.ndim == 2: self.sum_duplicates() return cusparse.csrmm2(self, cupy.asfortranarray(other)) else: raise ValueError('could not interpret dimensions') else: return NotImplemented
def isscalarlike(x): return cupy.isscalar(x) or (cupy.sparse.base.isdense(x) and x.ndim == 0)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): """Returns an array with evenly-spaced values within a given interval. Instead of specifying the step width like :func:`cupy.arange`, this function requires the total number of elements specified. Args: start (scalar or array_like): Starting value(s) of the sequence. stop (scalar or array_like): Ending value(s) of the sequence, unless ``endpoint`` is set to ``False``. In that case, the sequence consists of all but the last of ``num + 1`` evenly spaced samples, so that ``stop`` is excluded. Note that the step size changes when ``endpoint`` is ``False``. num: Number of elements. endpoint (bool): If ``True``, the stop value is included as the last element. Otherwise, the stop value is omitted. retstep (bool): If ``True``, this function returns (array, step). Otherwise, it returns only the array. dtype: Data type specifier. It is inferred from the start and stop arguments by default. axis (int): The axis in the result to store the samples. Relevant only if start or stop are array-like. By default ``0``, the samples will be along a new axis inserted at the beginning. Use ``-1`` to get an axis at the end. Returns: cupy.ndarray: The 1-D array of ranged values. .. seealso:: :func:`numpy.linspace` """ if num < 0: raise ValueError('linspace with num<0 is not supported') div = (num - 1) if endpoint else num scalar_start = cupy.isscalar(start) scalar_stop = cupy.isscalar(stop) if scalar_start and scalar_stop: return _linspace_scalar(start, stop, num, endpoint, retstep, dtype) if not scalar_start: if not (isinstance(start, cupy.ndarray) and start.dtype.kind == 'f'): start = cupy.asarray(start) * 1.0 if not scalar_stop: if not (isinstance(stop, cupy.ndarray) and stop.dtype.kind == 'f'): stop = cupy.asarray(stop) * 1.0 dt = cupy.result_type(start, stop, float(num)) if dtype is None: # In actual implementation, only float is used dtype = dt delta = stop - start # ret = cupy.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim) ret = cupy.empty((num, ), dtype=dt) _arange_ufunc(0.0, 1.0, ret, dtype=dt) ret = ret.reshape((-1, ) + (1, ) * delta.ndim) # In-place multiplication y *= delta/div is faster, but prevents the # multiplicant from overriding what class is produced, and thus prevents, # e.g. use of Quantities, see numpy#7142. Hence, we multiply in place only # for standard scalar types. if num > 1: step = delta / div if cupy.any(step == 0): # Special handling for denormal numbers, numpy#5437 ret /= div ret = ret * delta else: ret = ret * step else: # 0 and 1 item long sequences have an undefined step step = float('nan') # Multiply with delta to allow possible override of output class. ret = ret * delta ret += start if endpoint and num > 1: ret[-1] = stop if axis != 0: ret = cupy.moveaxis(ret, 0, axis) if cupy.issubdtype(dtype, cupy.integer): cupy.floor(ret, out=ret) ret = ret.astype(dtype, copy=False) if retstep: return ret, step else: return ret
def _props_to_dict(regions, properties=("label", "bbox"), separator="-"): """Convert image region properties list into a column dictionary. Parameters ---------- regions : (N,) list List of RegionProperties objects as returned by :func:`regionprops`. properties : tuple or list of str, optional Properties that will be included in the resulting dictionary For a list of available properties, please see :func:`regionprops`. Users should remember to add "label" to keep track of region identities. separator : str, optional For non-scalar properties not listed in OBJECT_COLUMNS, each element will appear in its own column, with the index of that element separated from the property name by this separator. For example, the inertia tensor of a 2D region will appear in four columns: ``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``, and ``inertia_tensor-1-1`` (where the separator is ``-``). Object columns are those that cannot be split in this way because the number of columns would change depending on the object. For example, ``image`` and ``coords``. Returns ------- out_dict : dict Dictionary mapping property names to an array of values of that property, one value per region. This dictionary can be used as input to pandas ``DataFrame`` to map property names to columns in the frame and regions to rows. Notes ----- Each column contains either a scalar property, an object property, or an element in a multidimensional array. Properties with scalar values for each region, such as "eccentricity", will appear as a float or int array with that property name as key. Multidimensional properties *of fixed size* for a given image dimension, such as "centroid" (every centroid will have three elements in a 3D image, no matter the region size), will be split into that many columns, with the name {property_name}{separator}{element_num} (for 1D properties), {property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D properties), and so on. For multidimensional properties that don't have a fixed size, such as "image" (the image of a region varies in size depending on the region size), an object array will be used, with the corresponding property name as the key. Examples -------- >>> from skimage import data, util, measure >>> image = data.coins() >>> label_image = measure.label(image > 110, connectivity=image.ndim) >>> proplist = regionprops(label_image, image) >>> props = _props_to_dict(proplist, properties=['label', 'inertia_tensor', ... 'inertia_tensor_eigvals']) >>> props # doctest: +ELLIPSIS +SKIP {'label': array([ 1, 2, ...]), ... 'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ... ..., 'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])} The resulting dictionary can be directly passed to pandas, if installed, to obtain a clean DataFrame: >>> import pandas as pd # doctest: +SKIP >>> data = pd.DataFrame(props) # doctest: +SKIP >>> data.head() # doctest: +SKIP label inertia_tensor-0-0 ... inertia_tensor_eigvals-1 0 1 4012.909888 ... 267.065503 1 2 8.514739 ... 2.834806 2 3 0.666667 ... 0.000000 3 4 0.000000 ... 0.000000 4 5 0.222222 ... 0.111111 """ out = {} n = len(regions) for prop in properties: dtype = COL_DTYPES[prop] column_buffer = cp.zeros(n, dtype=dtype) r = regions[0][prop] is_0dim_array = isinstance(r, cp.ndarray) and r.ndim == 0 # scalars and objects are dedicated one column per prop # array properties are raveled into multiple columns # for more info, refer to notes 1 if cp.isscalar(r) or is_0dim_array or prop in OBJECT_COLUMNS: for i in range(n): column_buffer[i] = regions[i][prop] out[prop] = cp.copy(column_buffer) else: if isinstance(r, cp.ndarray): shape = r.shape else: shape = (len(r), ) for ind in np.ndindex(shape): for k in range(n): loc = ind if len(ind) > 1 else ind[0] column_buffer[k] = regions[k][prop][loc] modified_prop = separator.join(map(str, (prop, ) + ind)) out[modified_prop] = cp.copy(column_buffer) return out
def _select(input, labels=None, index=None, find_min=False, find_max=False, find_min_positions=False, find_max_positions=False, find_median=False): """Return one or more of: min, max, min position, max position, median. If neither `labels` or `index` is provided, these are the global values in `input`. If `index` is None, but `labels` is provided, a global value across all non-zero labels is given. When both `labels` and `index` are provided, lists of values are provided for each labeled region specified in `index`. See further details in :func:`cupyx.scipy.ndimage.minimum`, etc. Used by minimum, maximum, minimum_position, maximum_position, extrema. """ find_positions = find_min_positions or find_max_positions positions = None if find_positions: positions = cupy.arange(input.size).reshape(input.shape) def single_group(vals, positions): result = [] if find_min: result += [vals.min()] if find_min_positions: result += [positions[vals == vals.min()][0]] if find_max: result += [vals.max()] if find_max_positions: result += [positions[vals == vals.max()][0]] if find_median: result += [cupy.median(vals)] return result if labels is None: return single_group(input, positions) # ensure input and labels match sizes input, labels = cupy.broadcast_arrays(input, labels) if index is None: mask = labels > 0 masked_positions = None if find_positions: masked_positions = positions[mask] return single_group(input[mask], masked_positions) if cupy.isscalar(index): mask = labels == index masked_positions = None if find_positions: masked_positions = positions[mask] return single_group(input[mask], masked_positions) index = cupy.asarray(index) safe_int = _safely_castable_to_int(labels.dtype) min_label = labels.min() max_label = labels.max() # Remap labels to unique integers if necessary, or if the largest label is # larger than the number of values. if (not safe_int or min_label < 0 or max_label > labels.size): # Remap labels, and indexes unique_labels, labels = cupy.unique(labels, return_inverse=True) idxs = cupy.searchsorted(unique_labels, index) # Make all of idxs valid idxs[idxs >= unique_labels.size] = 0 found = unique_labels[idxs] == index else: # Labels are an integer type, and there aren't too many idxs = cupy.asanyarray(index, int).copy() found = (idxs >= 0) & (idxs <= max_label) idxs[~found] = max_label + 1 input = input.ravel() labels = labels.ravel() if find_positions: positions = positions.ravel() using_cub = _core._accelerator.ACCELERATOR_CUB in \ cupy._core.get_routine_accelerators() if using_cub: # Cutoff values below were determined empirically for relatively large # input arrays. if find_positions or find_median: n_label_cutoff = 15 else: n_label_cutoff = 30 else: n_label_cutoff = 0 if n_label_cutoff and len(idxs) <= n_label_cutoff: return _select_via_looping( input, labels, idxs, positions, find_min, find_min_positions, find_max, find_max_positions, find_median ) order = cupy.lexsort(cupy.stack((input.ravel(), labels.ravel()))) input = input[order] labels = labels[order] if find_positions: positions = positions[order] # Determine indices corresponding to the min or max value for each label label_change_index = cupy.searchsorted(labels, cupy.arange(1, max_label + 2)) if find_min or find_min_positions or find_median: # index corresponding to the minimum value at each label min_index = label_change_index[:-1] if find_max or find_max_positions or find_median: # index corresponding to the maximum value at each label max_index = label_change_index[1:] - 1 result = [] # the order below matches the order expected by cupy.ndimage.extrema if find_min: mins = cupy.zeros(int(labels.max()) + 2, input.dtype) mins[labels[min_index]] = input[min_index] result += [mins[idxs]] if find_min_positions: minpos = cupy.zeros(labels.max().item() + 2, int) minpos[labels[min_index]] = positions[min_index] result += [minpos[idxs]] if find_max: maxs = cupy.zeros(int(labels.max()) + 2, input.dtype) maxs[labels[max_index]] = input[max_index] result += [maxs[idxs]] if find_max_positions: maxpos = cupy.zeros(labels.max().item() + 2, int) maxpos[labels[max_index]] = positions[max_index] result += [maxpos[idxs]] if find_median: locs = cupy.arange(len(labels)) lo = cupy.zeros(int(labels.max()) + 2, int) lo[labels[min_index]] = locs[min_index] hi = cupy.zeros(int(labels.max()) + 2, int) hi[labels[max_index]] = locs[max_index] lo = lo[idxs] hi = hi[idxs] # lo is an index to the lowest value in input for each label, # hi is an index to the largest value. # move them to be either the same ((hi - lo) % 2 == 0) or next # to each other ((hi - lo) % 2 == 1), then average. step = (hi - lo) // 2 lo += step hi -= step if input.dtype.kind in 'iub': # fix for https://github.com/scipy/scipy/issues/12836 result += [(input[lo].astype(float) + input[hi].astype(float)) / 2.0] else: result += [(input[lo] + input[hi]) / 2.0] return result
def mean(input, labels=None, index=None): """Calculates the mean of the values of an n-D image array, optionally at specified sub-regions. Args: input (cupy.ndarray): Nd-image data to process. labels (cupy.ndarray or None): Labels defining sub-regions in `input`. If not None, must be same shape as `input`. index (cupy.ndarray or None): `labels` to include in output. If None (default), all values where `labels` is non-zero are used. Returns: mean (cupy.ndarray): mean of values, for each sub-region if `labels` and `index` are specified. .. seealso:: :func:`scipy.ndimage.mean` """ if not isinstance(input, cupy.ndarray): raise TypeError('input must be cupy.ndarray') if input.dtype in (cupy.complex64, cupy.complex128): raise TypeError("cupyx.scipy.ndimage.mean does not support %{}".format( input.dtype.type)) use_kern = False # There is constraints on types because of atomicAdd() in CUDA. if input.dtype not in [cupy.int32, cupy.float16, cupy.float32, cupy.float64, cupy.uint32, cupy.uint64, cupy.ulonglong]: warnings.warn( 'Using the slower implmentation as ' 'cupyx.scipy.ndimage.mean supports int32, float16, ' 'float32, float64, uint32, uint64 as data types ' 'for the fast implmentation', _util.PerformanceWarning) use_kern = True def calc_mean_with_intermediate_float(input): sum = input.sum() count = input.size # Does not use `ndarray.mean()` here to return the same results as # SciPy does, especially in case `input`'s dtype is float16. return sum / cupy.asanyarray(count).astype(float) if labels is None: return calc_mean_with_intermediate_float(input) if not isinstance(labels, cupy.ndarray): raise TypeError('label must be cupy.ndarray') input, labels = cupy.broadcast_arrays(input, labels) if index is None: return calc_mean_with_intermediate_float(input[labels > 0]) if cupy.isscalar(index): return calc_mean_with_intermediate_float(input[labels == index]) if not isinstance(index, cupy.ndarray): if not isinstance(index, int): raise TypeError('index must be cupy.ndarray or a scalar int') else: return (input[labels == index]).mean(dtype=cupy.float64) return _mean_driver(input, labels, index, use_kern=use_kern)
def variance(input, labels=None, index=None): """Calculates the variance of the values of an n-D image array, optionally at specified sub-regions. Args: input (cupy.ndarray): Nd-image data to process. labels (cupy.ndarray or None): Labels defining sub-regions in `input`. If not None, must be same shape as `input`. index (cupy.ndarray or None): `labels` to include in output. If None (default), all values where `labels` is non-zero are used. Returns: cupy.ndarray: Values of variance, for each sub-region if `labels` and `index` are specified. .. seealso:: :func:`scipy.ndimage.variance` """ if not isinstance(input, cupy.ndarray): raise TypeError('input must be cupy.ndarray') if input.dtype in (cupy.complex64, cupy.complex128): raise TypeError("cupyx.scipy.ndimage.variance doesn't support %{}" "".format(input.dtype.type)) use_kern = False # There are constraints on types because of atomicAdd() in CUDA. if input.dtype not in [cupy.int32, cupy.float16, cupy.float32, cupy.float64, cupy.uint32, cupy.uint64, cupy.ulonglong]: warnings.warn( 'Using the slower implementation because the provided ' f'type {input.dtype} is not supported by cupyx.scipy.ndimage.sum. ' 'Consider using an array of type int32, float16, ' 'float32, float64, uint32, uint64 as data types ' 'for the fast implementation', _util.PerformanceWarning) use_kern = True def calc_var_with_intermediate_float(input): vals_c = input - input.mean() count = vals_c.size # Does not use `ndarray.mean()` here to return the same results as # SciPy does, especially in case `input`'s dtype is float16. return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float) if labels is None: return calc_var_with_intermediate_float(input) if not isinstance(labels, cupy.ndarray): raise TypeError('label must be cupy.ndarray') input, labels = cupy.broadcast_arrays(input, labels) if index is None: return calc_var_with_intermediate_float(input[labels > 0]) if cupy.isscalar(index): return calc_var_with_intermediate_float(input[labels == index]) if not isinstance(index, cupy.ndarray): if not isinstance(index, int): raise TypeError('index must be cupy.ndarray or a scalar int') else: return (input[labels == index]).var().astype(cupy.float64, copy=False) mean_val, count = _mean_driver(input, labels, index, True, use_kern) if use_kern: new_axis = (..., *(cupy.newaxis for _ in range(input.ndim))) return cupy.where(labels[None, ...] == index[new_axis], cupy.square(input - mean_val[new_axis]), 0).sum(tuple(range(1, input.ndim + 1))) / count out = cupy.zeros_like(index, dtype=cupy.float64) return _ndimage_variance_kernel(input, labels, index, index.size, mean_val, out) / count
def labeled_comprehension( input, labels, index, func, out_dtype, default, pass_positions=False ): """Array resulting from applying ``func`` to each labeled region. Roughly equivalent to [func(input[labels == i]) for i in index]. Sequentially applies an arbitrary function (that works on array_like input) to subsets of an N-D image array specified by `labels` and `index`. The option exists to provide the function with positional parameters as the second argument. Args: input (cupy.ndarray): Data from which to select `labels` to process. labels (cupy.ndarray or None): Labels to objects in `input`. If not None, array must be same shape as `input`. If None, `func` is applied to raveled `input`. index (int, sequence of ints or None): Subset of `labels` to which to apply `func`. If a scalar, a single value is returned. If None, `func` is applied to all non-zero values of `labels`. func (callable): Python function to apply to `labels` from `input`. out_dtype (dtype): Dtype to use for `result`. default (int, float or None): Default return value when a element of `index` does not exist in `labels`. pass_positions (bool, optional): If True, pass linear indices to `func` as a second argument. Returns: cupy.ndarray: Result of applying `func` to each of `labels` to `input` in `index`. .. seealso:: :func:`scipy.ndimage.labeled_comprehension` """ as_scalar = cupy.isscalar(index) input = cupy.asarray(input) if pass_positions: positions = cupy.arange(input.size).reshape(input.shape) if labels is None: if index is not None: raise ValueError('index without defined labels') if not pass_positions: return func(input.ravel()) else: return func(input.ravel(), positions.ravel()) try: input, labels = cupy.broadcast_arrays(input, labels) except ValueError: raise ValueError( 'input and labels must have the same shape ' '(excepting dimensions with width 1)' ) if index is None: if not pass_positions: return func(input[labels > 0]) else: return func(input[labels > 0], positions[labels > 0]) index = cupy.atleast_1d(index) if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index): raise ValueError( 'Cannot convert index values from <%s> to <%s> ' '(labels.dtype) without loss of precision' % (index.dtype, labels.dtype) ) index = index.astype(labels.dtype) # optimization: find min/max in index, and select those parts of labels, # input, and positions lo = index.min() hi = index.max() mask = (labels >= lo) & (labels <= hi) # this also ravels the arrays labels = labels[mask] input = input[mask] if pass_positions: positions = positions[mask] # sort everything by labels label_order = labels.argsort() labels = labels[label_order] input = input[label_order] if pass_positions: positions = positions[label_order] index_order = index.argsort() sorted_index = index[index_order] def do_map(inputs, output): """labels must be sorted""" nidx = sorted_index.size # Find boundaries for each stretch of constant labels # This could be faster, but we already paid N log N to sort labels. lo = cupy.searchsorted(labels, sorted_index, side='left') hi = cupy.searchsorted(labels, sorted_index, side='right') for i, low, high in zip(range(nidx), lo, hi): if low == high: continue output[i] = func(*[inp[low:high] for inp in inputs]) if out_dtype == object: temp = {i: default for i in range(index.size)} else: temp = cupy.empty(index.shape, out_dtype) if default is None and temp.dtype.kind in 'fc': default = numpy.nan # match NumPy floating-point None behavior temp[:] = default if not pass_positions: do_map([input], temp) else: do_map([input, positions], temp) if out_dtype == object: # use a list of arrays since object arrays are not supported index_order = cupy.asnumpy(index_order) output = [temp[i] for i in index_order.argsort()] else: output = cupy.zeros(index.shape, out_dtype) output[cupy.asnumpy(index_order)] = temp if as_scalar: output = output[0] return output