Example #1
0
 def _promote(type1, type2):
     if type2 is None:
         return type1
     if type1.shape is not None:
         if not type1.shape == type2.shape:
             raise Exception("We do not handle changes to dtypes that have shape")
         return np.promote_types(type1.base, type2.base), type1.shape
     return np.promote_types(type1, type2)
Example #2
0
    def axpy(self, alpha, x, ind=None, x_ind=None):
        assert self.check_ind_unique(ind)
        assert x.check_ind(x_ind)
        assert self.dim == x.dim
        assert self.len_ind(ind) == x.len_ind(x_ind) or x.len_ind(x_ind) == 1
        assert isinstance(alpha, _INDEXTYPES) \
            or isinstance(alpha, np.ndarray) and alpha.shape == (self.len_ind(ind),)

        if self._refcount[0] > 1:
            self._deep_copy()

        if NUMPY_INDEX_QUIRK:
            if self._len == 0 and hasattr(ind, '__len__'):
                ind = None
            if x._len == 0 and hasattr(x_ind, '__len__'):
                x_ind = None

        if np.all(alpha == 0):
            return

        B = x._array[:x._len] if x_ind is None else x._array[x_ind]

        alpha_type = type(alpha)
        alpha_dtype = alpha.dtype if alpha_type is np.ndarray else alpha_type
        if self._array.dtype != alpha_dtype or self._array.dtype != B.dtype:
            dtype = np.promote_types(self._array.dtype, alpha_dtype)
            dtype = np.promote_types(dtype, B.dtype)
            self._array = self._array.astype(dtype)

        if np.all(alpha == 1):
            if ind is None:
                self._array[:self._len] += B
            elif isinstance(ind, Number) and B.ndim == 2:
                self._array[ind] += B.reshape((B.shape[1],))
            else:
                self._array[ind] += B
        elif np.all(alpha == -1):
            if ind is None:
                self._array[:self._len] -= B
            elif isinstance(ind, Number) and B.ndim == 2:
                self._array[ind] -= B.reshape((B.shape[1],))
            else:
                self._array[ind] -= B
        else:
            if isinstance(alpha, np.ndarray):
                alpha = alpha[:, np.newaxis]
            if ind is None:
                self._array[:self._len] += (B * alpha)
            elif isinstance(ind, Number):
                self._array[ind] += (B * alpha).reshape((-1,))
            else:
                self._array[ind] += (B * alpha)
Example #3
0
def multiply_dm_dm(matrix1, matrix2):
    """Multiply a block diagonal matrix by another diagonal matrix.

    Parameters
    ----------
    matrix1 : (nblocks, n, m) np.ndarray
        An array containing `nblocks` diagonal blocks of size (`n`, `m`).
    matrix2 : (nblocks, m, k) np.ndarray
        An array containing `nblocks` diagonal blocks of size (`m`, `k`).

    Returns
    -------
    nmatrix : (nblocks, n, k) np.ndarray
         An array containing `nblocks` diagonal blocks of size (`n`, `k`).
    """


    nblocks, n, m = matrix1.shape
    k = matrix2.shape[2]


    if matrix2.shape[:2] != (nblocks, m):
        raise Exception("Shapes not compatible.")

    # Check dtype
    dt = np.promote_types(matrix1.dtype, matrix2.dtype)

    nmatrix = np.empty((nblocks, n, k), dtype=dt)

    for i in range(nblocks):
        nmatrix[i] = np.dot(matrix1[i], matrix2[i])

    return nmatrix
Example #4
0
File: npydecl.py Project: esc/numba
    def generic(self, args, kws):
        assert not kws

        if len(args) == 1:
            # 0-dim arrays return one result array
            ary = args[0]
            ndim = max(ary.ndim, 1)
            retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)
            return signature(retty, ary)

        elif len(args) == 3:
            cond, x, y = args
            retdty = from_dtype(np.promote_types(
                        as_dtype(getattr(args[1], 'dtype', args[1])),
                        as_dtype(getattr(args[2], 'dtype', args[2]))))
            if isinstance(cond, types.Array):
                # array where()
                if isinstance(x, types.Array) and isinstance(y, types.Array):
                    if (cond.ndim == x.ndim == y.ndim):
                        if x.layout == y.layout == cond.layout:
                            retty = types.Array(retdty, x.ndim, x.layout)
                        else:
                            retty = types.Array(retdty, x.ndim, 'C')
                        return signature(retty, *args)
                else:
                    # x and y both scalar
                    retty = types.Array(retdty, cond.ndim, cond.layout)
                    return signature(retty, *args)
            else:
                # scalar where()
                if not isinstance(x, types.Array):
                    retty = types.Array(retdty, 0, 'C')
                    return signature(retty, *args)
Example #5
0
 def test_basic(self):
     a, b = self.a, self.b
     x = eval("a @ b")
     assert_equal(x.dtype,
                  np.promote_types(a.dtype, b.dtype))
     assert_allclose(x.todense(),
                     np.dot(a.todense(), b.todense()), atol=1e-15)
Example #6
0
def tensordot(lhs, rhs, axes=2):
    if isinstance(axes, Iterable):
        left_axes, right_axes = axes
    else:
        left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
        right_axes = tuple(range(0, axes))

    if isinstance(left_axes, int):
        left_axes = (left_axes,)
    if isinstance(right_axes, int):
        right_axes = (right_axes,)
    if isinstance(left_axes, list):
        left_axes = tuple(left_axes)
    if isinstance(right_axes, list):
        right_axes = tuple(right_axes)

    dt = np.promote_types(lhs.dtype, rhs.dtype)

    left_index = list(alphabet[:lhs.ndim])
    right_index = list(ALPHABET[:rhs.ndim])
    out_index = left_index + right_index

    for l, r in zip(left_axes, right_axes):
        out_index.remove(right_index[r])
        right_index[r] = left_index[l]

    intermediate = atop(_tensordot, out_index,
                        lhs, left_index,
                        rhs, right_index, dtype=dt,
                        axes=(left_axes, right_axes))

    result = intermediate.sum(axis=left_axes)
    return result
Example #7
0
def get_volume_pixeldata(sorted_slices):
    """
    the slice and intercept calculation can cause the slices to have different dtypes
    we should get the correct dtype that can cover all of them
    
    :type sorted_slices: list of slices
    :param sorted_slices: sliced sored in the correct order to create volume
    """
    slices = []
    combined_dtype = None
    for slice_ in sorted_slices:
        slice_data = _get_slice_pixeldata(slice_)
        slice_data = slice_data[numpy.newaxis, :, :]
        slices.append(slice_data)
        if combined_dtype is None:
            combined_dtype = slice_data.dtype
        else:
            combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)

    # create the new volume with with the correct data
    vol = numpy.concatenate(slices, axis=0)

    # Done
    vol = numpy.transpose(vol, (2, 1, 0))
    return vol
Example #8
0
 def unify_pairs(self, first, second):
     """
     Choose PyObject type as the abstract if we fail to determine a concrete
     type.
     """
     # TODO: should add an option to reject unsafe type conversion
     d = self.type_compatibility(fromty=first, toty=second)
     if d is None:
         return types.pyobject
     elif d == 'exact':
         # Same type
         return first
     elif d == 'promote':
         return second
     elif d in ('safe', 'unsafe'):
         assert first in types.number_domain
         assert second in types.number_domain
         a = numpy.dtype(str(first))
         b = numpy.dtype(str(second))
         # Just use NumPy coercion rules
         sel = numpy.promote_types(a, b)
         # Convert NumPy dtype back to Numba types
         return getattr(types, str(sel))
     else:
         raise Exception("type_compatibility returned %s" % d)
Example #9
0
File: numpy.py Project: pymor/pymor
    def append(self, other, remove_from_other=False):
        assert self.dim == other.dim
        assert not remove_from_other or (other is not self and getattr(other, 'base', None) is not self)

        if self._refcount[0] > 1:
            self._deep_copy()

        other_array = other.to_numpy()
        len_other = len(other_array)
        if len_other == 0:
            return

        if len_other <= self._array.shape[0] - self._len:
            if self._array.dtype != other_array.dtype:
                self._array = self._array.astype(np.promote_types(self._array.dtype, other_array.dtype))
            self._array[self._len:self._len + len_other] = other_array
        else:
            self._array = np.append(self._array[:self._len], other_array, axis=0)
        self._len += len_other

        if remove_from_other:
            if other.is_view:
                del other.base[other.ind]
            else:
                del other[:]
Example #10
0
 def dtype(self):
     """Return dtype of image data in file."""
     # subblock data can be of different pixel type
     dtype = self.filtered_subblock_directory[0].dtype[-2:]
     for directory_entry in self.filtered_subblock_directory:
         dtype = numpy.promote_types(dtype, directory_entry.dtype[-2:])
     return dtype
Example #11
0
    def unify_pairs(self, first, second):
        """
        Choose PyObject type as the abstract if we fail to determine a concrete
        type.
        """
        # TODO: should add an option to reject unsafe type conversion
        d = self.type_compatibility(fromty=first, toty=second)
        if d is None:
            # Complex is not allowed to downcast implicitly.
            # Need to try the other direction of implicit cast to find the
            # most general type of the two.
            first, second = second, first   # swap operand order
            d = self.type_compatibility(fromty=first, toty=second)

        if d is None:
            return types.pyobject
        elif d == 'exact':
            # Same type
            return first
        elif d == 'promote':
            return second
        elif d in ('safe', 'unsafe'):
            assert first in types.number_domain
            assert second in types.number_domain
            a = numpy.dtype(str(first))
            b = numpy.dtype(str(second))
            # Just use NumPy coercion rules
            sel = numpy.promote_types(a, b)
            # Convert NumPy dtype back to Numba types
            return getattr(types, str(sel))
        elif d in 'int-tuple-coerce':
            return types.UniTuple(dtype=types.intp, count=len(first))
        else:
            raise Exception("type_compatibility returned %s" % d)
def full_cumsum(data, axis=None, dtype=None):
    """
    A version of `numpy.cumsum` that includes the sum of the empty slice (zero). This
    makes it satisfy the invariant::
    
        cumsum(a)[i] == sum(a[:i])
    
    which is a useful property to simplify the formula for the moving average. The result
    will be one entry longer than *data* along *axis*.
    """
    
    # All we need to do is construct a result array with the appropriate type and
    # dimensions, and then feed a slice of it to cumsum, setting the rest to zero.
    
    shape = list(data.shape)
    if axis is None:
        shape[0] += 1
    else:
        shape[axis] += 1
    # Mimic cumsum's behavior with the dtype argument: use the original data type or
    # the system's native word, whichever has the greater width. (This prevents us from
    # attempting a cumulative sum using an 8-bit integer, for instance.)
    if dtype is None:
        dtype = np.promote_types(data.dtype, np.min_scalar_type(-sys.maxint))
    
    out = np.zeros(shape, dtype)
    
    s = axis_slice(axis)
    np.cumsum(data, axis, dtype, out[s[1:]])
    
    return out
Example #13
0
def _resample_coord(coord, src_coord, direction, target_points, interpolate):
    if coord.ndim != 1:
        raise iris.exceptions.NotYetImplementedError(
            'Linear interpolation of multi-dimensional coordinates.')
    coord_points = coord.points
    if coord is src_coord:
        dtype = coord_points.dtype
        if dtype.kind == 'i':
            dtype = np.promote_types(dtype, np.float16)
        new_points = np.array(target_points, dtype=dtype)
    else:
        if getattr(src_coord, 'circular', False):
            coord_points = np.append(coord_points, coord_points[0])

        # If the source coordinate was monotonic decreasing, we need to
        # flip this coordinate's values.
        if direction == -1:
            coord_points = iris.util.reverse(coord_points, axes=0)

        new_points = interpolate(coord_points, target_points)

    # Watch out for DimCoord instances that are no longer monotonic
    # after the resampling.
    try:
        new_coord = coord.copy(new_points)
    except ValueError:
        new_coord = iris.coords.AuxCoord.from_coord(coord).copy(new_points)
    return new_coord
Example #14
0
 def _set_or_promote_dtype(self, column_dtypes, c, dtype):
     existing_dtype = column_dtypes.get(c)
     if existing_dtype is None or existing_dtype != dtype:
         # Promote ints to floats - as we can't easily represent NaNs
         if np.issubdtype(dtype, int):
             dtype = np.dtype('f8')
         column_dtypes[c] = np.promote_types(column_dtypes.get(c, dtype), dtype)
Example #15
0
 def normalize_compare_value(self, other):
     other_dtype = np.min_scalar_type(other)
     if other_dtype.kind in 'biuf':
         other_dtype = np.promote_types(self.dtype, other_dtype)
         ary = utils.scalar_broadcast_to(other, shape=len(self),
                                         dtype=other_dtype)
         return self.replace(data=Buffer(ary), dtype=ary.dtype)
     else:
         raise TypeError('cannot broadcast {}'.format(type(other)))
Example #16
0
 def _update_edges(self, weights, copy=False):
   weights = np.asarray(weights)
   res_dtype = np.promote_types(weights.dtype, self._adj.dtype)
   adj = self._adj.astype(res_dtype, copy=copy)
   adj[adj != 0] = weights
   if copy:
     return DenseAdjacencyMatrixGraph(adj)
   self._adj = adj
   return self
Example #17
0
 def _weightable_adj(self, weight, copy):
   weight = np.atleast_1d(weight)
   adj = self._adj
   res_dtype = np.promote_types(weight.dtype, adj.dtype)
   if copy:
     adj = adj.copy()
   if res_dtype is not adj.dtype:
     adj.data = adj.data.astype(res_dtype)
   return adj
Example #18
0
 def test_dense_sparse(self):
     # dense @ sparse -> dense
     a, b = self.a.todense(), self.b
     x = eval("a @ b")
     assert_(isinstance(x, np.ndarray))
     assert_equal(x.dtype,
                  np.promote_types(a.dtype, b.dtype))
     assert_allclose(x,
                     np.dot(a, b.todense()), atol=1e-15)
Example #19
0
 def _promote_types(self, dtype, dtype_str):
     if dtype_str == str(dtype):
         return dtype
     prev_dtype = self._dtype(dtype_str)
     if dtype.names is None:
         rtn = np.promote_types(dtype, prev_dtype)
     else:
         rtn = _promote_struct_dtypes(dtype, prev_dtype)
     rtn = np.dtype(rtn, metadata=dict(dtype.metadata or {}))
     return rtn
Example #20
0
File: numpy.py Project: pymor/pymor
 def __isub__(self, other):
     assert self.dim == other.dim
     if self._refcount[0] > 1:
         self._deep_copy()
     other_dtype = other.base._array.dtype if other.is_view else other._array.dtype
     common_dtype = np.promote_types(self._array.dtype, other_dtype)
     if self._array.dtype != common_dtype:
         self._array = self._array.astype(common_dtype)
     self._array[:self._len] -= other.base._array[other.ind] if other.is_view else other._array[:other._len]
     return self
Example #21
0
 def func(d1, d2):
     a1 = None if d1 is None else np.array(1, dtype=d1)
     a2 = None if d2 is None else np.array(1, dtype=d2)
     if d1 is None and d2 is None:
         assert_raises(ValueError, cast, [a1, a2])
         return
     expected = d1 if d2 is None else d2 if d1 is None else np.promote_types(d1, d2)
     a1_, a2_ = cast([a1, a2])
     assert_dtype(a1_, expected)
     assert_dtype(a2_, expected)
Example #22
0
File: numpy.py Project: pymor/pymor
 def __imul__(self, other):
     assert isinstance(other, Number) \
         or isinstance(other, np.ndarray) and other.shape == (len(self),)
     if self._refcount[0] > 1:
         self._deep_copy()
     other_dtype = other.dtype if isinstance(other, np.ndarray) else type(other)
     common_dtype = np.promote_types(self._array.dtype, other_dtype)
     if self._array.dtype != common_dtype:
         self._array = self._array.astype(common_dtype)
     self._array[:self._len] *= other
     return self
Example #23
0
 def combine(self, other):
     if hasattr(other, "dtype"):
         combined_dtype = np.promote_types(self.dtype, other.dtype)
         if combined_dtype == self.dtype:
             return self
         elif combined_dtype == other.dtype:
             return other
         else:
             return from_dtype(combined_dtype)
     else:
         raise IncompatibleTypes(self, other)
Example #24
0
def vander(x,N):
  x = np.asarray(x)
  if x.ndim != 1:
    raise ValueError("x must be a 1-D array or sequence")
  v = np.empty((len(x), N), dtype=np.promote_types(x.dtype, int))
  if N > 0:
    v[:,0] = 1
  if N > 1:
    v[:, 1:] = x[:, None]
    np.multiply.accumulate(v[:, 1:], out=v[:, 1:], axis=1)
  return v
Example #25
0
File: numpy.py Project: pymor/pymor
 def __iadd__(self, other):
     assert self.dim == other.dim
     assert self.base.check_ind_unique(self.ind)
     if self.base._refcount[0] > 1:
         self._deep_copy()
     other_dtype = other.base._array.dtype if other.is_view else other._array.dtype
     common_dtype = np.promote_types(self.base._array.dtype, other_dtype)
     if self.base._array.dtype != common_dtype:
         self.base._array = self.base._array.astype(common_dtype)
     self.base.array[self.ind] += other.base._array[other.ind] if other.is_view else other._array[:other._len]
     return self
Example #26
0
 def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False):
   weight = np.atleast_1d(1 if weight is None else weight)
   res_dtype = np.promote_types(weight.dtype, self._adj.dtype)
   adj = self._adj.astype(res_dtype, copy=copy)
   adj[from_idx, to_idx] = weight
   if symmetric:
     adj[to_idx, from_idx] = weight
   if copy:
     return DenseAdjacencyMatrixGraph(adj)
   self._adj = adj
   return self
Example #27
0
 def combine(self, other):
   if isinstance(other, ScalarT):
     combined_dtype = np.promote_types(self.dtype, other.dtype)
     if combined_dtype == self.dtype:
       return self
     elif combined_dtype == other.dtype:
       return other
     else:
       return from_dtype(combined_dtype)
   else:
     raise IncompatibleTypes(self, other)
Example #28
0
    def assemble_lincomb(self, operators, coefficients, solver_options=None, name=None):
        if not all(isinstance(op, (NumpyMatrixOperator, ZeroOperator, IdentityOperator)) for op in operators):
            return None

        common_mat_dtype = reduce(np.promote_types,
                                  (op._matrix.dtype for op in operators if hasattr(op, '_matrix')))
        common_coef_dtype = reduce(np.promote_types, (type(c.real if c.imag == 0 else c) for c in coefficients))
        common_dtype = np.promote_types(common_mat_dtype, common_coef_dtype)

        if coefficients[0] == 1:
            matrix = operators[0]._matrix.astype(common_dtype)
        else:
            if coefficients[0].imag == 0:
                matrix = operators[0]._matrix * coefficients[0].real
            else:
                matrix = operators[0]._matrix * coefficients[0]
            if matrix.dtype != common_dtype:
                matrix = matrix.astype(common_dtype)

        for op, c in zip(operators[1:], coefficients[1:]):
            if type(op) is ZeroOperator:
                continue
            elif type(op) is IdentityOperator:
                if operators[0].sparse:
                    try:
                        matrix += (scipy.sparse.eye(matrix.shape[0]) * c)
                    except NotImplementedError:
                        matrix = matrix + (scipy.sparse.eye(matrix.shape[0]) * c)
                else:
                    matrix += (np.eye(matrix.shape[0]) * c)
            elif c == 1:
                try:
                    matrix += op._matrix
                except NotImplementedError:
                    matrix = matrix + op._matrix
            elif c == -1:
                try:
                    matrix -= op._matrix
                except NotImplementedError:
                    matrix = matrix - op._matrix
            elif c.imag == 0:
                try:
                    matrix += (op._matrix * c.real)
                except NotImplementedError:
                    matrix = matrix + (op._matrix * c.real)
            else:
                try:
                    matrix += (op._matrix * c)
                except NotImplementedError:
                    matrix = matrix + (op._matrix * c)
        return NumpyMatrixOperator(matrix,
                                   source_id=self.source.id,
                                   range_id=self.range.id,
                                   solver_options=solver_options)
Example #29
0
 def unify(self, typingctx, other):
     """
     Unify the two number types using Numpy's rules.
     """
     from .. import numpy_support
     if isinstance(other, Number):
         # XXX: this can produce unsafe conversions,
         # e.g. would unify {int64, uint64} to float64
         a = numpy_support.as_dtype(self)
         b = numpy_support.as_dtype(other)
         sel = numpy.promote_types(a, b)
         return numpy_support.from_dtype(sel)
Example #30
0
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
    '''
    Gradient of function, or Jacobian if function f returns 1d array

    Parameters
    ----------
    x : array
        parameters at which the derivative is evaluated
    f : function
        `f(*((x,)+args), **kwargs)` returning either one value or 1d array
    epsilon : float, optional
        Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
        `centered` == False and EPS**(1/3)*x for `centered` == True.
    args : tuple
        Tuple of additional arguments for function `f`.
    kwargs : dict
        Dictionary of additional keyword arguments for function `f`.
    centered : bool
        Whether central difference should be returned. If not, does forward
        differencing.

    Returns
    -------
    grad : array
        gradient or Jacobian

    Notes
    -----
    If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
    by f (e.g., with a value for each observation), it returns a 3d array
    with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
    the Jacobian of the first observation would be [:, 0, :]
    '''
    n = len(x)
    # TODO:  add scaled stepsize
    f0 = f(*((x,)+args), **kwargs)
    dim = np.atleast_1d(f0).shape  # it could be a scalar
    grad = np.zeros((n,) + dim, np.promote_types(float, x.dtype))
    ei = np.zeros((n,), float)
    if not centered:
        epsilon = _get_epsilon(x, 2, epsilon, n)
        for k in range(n):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,) + args), **kwargs) - f0)/epsilon[k]
            ei[k] = 0.0
    else:
        epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
        for k in range(len(x)):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,)+args), **kwargs) -
                          f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
            ei[k] = 0.0
    return grad.squeeze().T
Example #31
0
def coefficients_from_grid_functions_list(grid_funs):
    """
    Return a vector of coefficients of a list of grid functions.

    Given a list [f0, f1, f2, ...] this function returns a
    single Numpy vector containing [f1.coefficients, f2.coefficients, ...].

    Parameters
    ----------
    grid_funs : list of GridFunction objects
        A list containing the grid functions
    """
    vec_len = 0
    input_type = _np.dtype("float32")
    for item in grid_funs:
        input_type = _np.promote_types(input_type, item.coefficients.dtype)
        vec_len += item.space.global_dof_count
    res = _np.zeros(vec_len, dtype=input_type)
    pos = 0
    for item in grid_funs:
        dof_count = item.space.global_dof_count
        res[pos:pos + dof_count] = item.coefficients
        pos += dof_count
    return res
Example #32
0
    def reindexer(X, fill_value=fill_value):
        if not np.can_cast(fill_value, X.dtype):
            out_dtype = np.promote_types(np.array(fill_value).dtype, X.dtype)
        else:
            out_dtype = X.dtype

        idxmtx = sparse.coo_matrix(
            (np.ones(len(new_pts), dtype=int), (cur_pts, new_pts)),
            shape=(old_size, new_size),
            dtype=out_dtype,
        )
        out = X @ idxmtx

        if fill_value != 0:
            to_fill = new_var.get_indexer(new_var.difference(cur_var))
            if len(to_fill) > 0:
                # More efficient to set columns on csc
                if sparse.issparse(out):
                    out = sparse.csc_matrix(out)
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
                    out[:, to_fill] = fill_value

        return out
Example #33
0
    def scal(self, alpha, ind=None):
        assert self.check_ind_unique(ind)
        assert isinstance(alpha, _INDEXTYPES) \
            or isinstance(alpha, np.ndarray) and alpha.shape == (self.len_ind(ind),)

        if self._refcount[0] > 1:
            self._deep_copy()

        if NUMPY_INDEX_QUIRK and self._len == 0:
            return

        if isinstance(alpha, np.ndarray) and not isinstance(ind, Number):
            alpha = alpha[:, np.newaxis]

        alpha_type = type(alpha)
        alpha_dtype = alpha.dtype if alpha_type is np.ndarray else alpha_type
        if self._array.dtype != alpha_dtype:
            self._array = self._array.astype(
                np.promote_types(self._array.dtype, alpha_dtype))

        if ind is None:
            self._array[:self._len] *= alpha
        else:
            self._array[ind] *= alpha
Example #34
0
 def _init_attrs_from_file(self, filename):
     with rasterio.drivers():
         with rasterio.open(filename, 'r') as src:
             #: dict: rasterio metadata of first file
             self.md = src.meta.copy()
             self.crs = src.crs
             self.affine = src.affine
             self.res = src.res
             self.ul = src.ul(0, 0)
             self.height = src.height
             self.width = src.width
             self.count = src.count
             self.length = len(self.df)
             self.block_windows = list(src.block_windows())
             self.nodatavals = src.nodatavals
             # We only use one datatype for reading -- promote to largest
             # if hetereogeneous
             self.dtype = src.dtypes[0]
             if not all([dt == self.dtype for dt in src.dtypes[1:]]):
                 logger.warning('GDAL reader cannot read multiple data '
                                'types. Promoting memory allocation to '
                                'largest datatype of source bands')
                 for dtype in src.dtypes[1:]:
                     self.dtype = np.promote_types(self.dtype, dtype)
Example #35
0
def test_butterworth_2D_realfft(high_pass, dtype, squared_butterworth):
    """Filtering a real-valued array is equivalent to filtering a
       complex-valued array where the imaginary part is zero.
    """
    im = np.random.randn(32, 64).astype(dtype)
    kwargs = dict(
        cutoff_frequency_ratio=0.20,
        high_pass=high_pass,
        squared_butterworth=squared_butterworth,
    )

    expected_dtype = _supported_float_type(im.dtype)
    filtered_real = butterworth(im, **kwargs)
    assert filtered_real.dtype == expected_dtype

    cplx_dtype = np.promote_types(im.dtype, np.complex64)
    filtered_cplx = butterworth(im.astype(cplx_dtype), **kwargs)
    assert filtered_cplx.real.dtype == expected_dtype

    if expected_dtype == np.float64:
        rtol = atol = 1e-13
    else:
        rtol = atol = 1e-5
    assert_allclose(filtered_real, filtered_cplx.real, rtol=rtol, atol=atol)
Example #36
0
def _get_output(output, input, shape=None, complex_output=False):
    if shape is None:
        shape = input.shape
    if output is None:
        if not complex_output:
            output = numpy.zeros(shape, dtype=input.dtype.name)
        else:
            complex_type = numpy.promote_types(input.dtype, numpy.complex64)
            output = numpy.zeros(shape, dtype=complex_type)
    elif isinstance(output, (type, numpy.dtype)):
        # Classes (like `np.float32`) and dtypes are interpreted as dtype
        if complex_output and numpy.dtype(output).kind != 'c':
            raise RuntimeError("output must have complex dtype")
        output = numpy.zeros(shape, dtype=output)
    elif isinstance(output, str):
        output = numpy.typeDict[output]
        if complex_output and numpy.dtype(output).kind != 'c':
            raise RuntimeError("output must have complex dtype")
        output = numpy.zeros(shape, dtype=output)
    elif output.shape != shape:
        raise RuntimeError("output shape not correct")
    elif complex_output and output.dtype.kind != 'c':
        raise RuntimeError("output must have complex dtype")
    return output
Example #37
0
def get_minimal_dtype(arrays, increase_itemsize_factor=1):
    assert isinstance(arrays, list), (
        "Expected a list of arrays or dtypes, got type %s." % (type(arrays),))
    assert len(arrays) > 0, (
        "Cannot estimate minimal dtype of an empty iterable.")

    input_dts = normalize_dtypes(arrays)

    # This loop construct handles (1) list of a single dtype, (2) list of two
    # dtypes and (3) list of 3+ dtypes. Note that promote_dtypes() always
    # expects exactly two dtypes.
    promoted_dt = input_dts[0]
    input_dts = input_dts[1:]
    while len(input_dts) >= 1:
        promoted_dt = np.promote_types(promoted_dt, input_dts[0])
        input_dts = input_dts[1:]

    if increase_itemsize_factor > 1:
        assert isinstance(promoted_dt, np.dtype), (
            "Expected numpy.dtype output from numpy.promote_dtypes, got type "
            "%s." % (type(promoted_dt),))
        return increase_itemsize_of_dtype(promoted_dt,
                                          increase_itemsize_factor)
    return promoted_dt
Example #38
0
File: csr.py Project: toslunar/cupy
def multiply_by_dense(sp, dn):
    check_shape_for_pointwise_op(sp.shape, dn.shape)
    sp_m, sp_n = sp.shape
    dn_m, dn_n = dn.shape
    m, n = max(sp_m, dn_m), max(sp_n, dn_n)
    nnz = sp.nnz * (m // sp_m) * (n // sp_n)
    dtype = numpy.promote_types(sp.dtype, dn.dtype)
    data = cupy.empty(nnz, dtype=dtype)
    indices = cupy.empty(nnz, dtype=sp.indices.dtype)
    if m > sp_m:
        if n > sp_n:
            indptr = cupy.arange(0, nnz + 1, n, dtype=sp.indptr.dtype)
        else:
            indptr = cupy.arange(0, nnz + 1, sp.nnz, dtype=sp.indptr.dtype)
    else:
        indptr = sp.indptr.copy()
        if n > sp_n:
            indptr *= n

    # out = sp * dn
    cupy_multiply_by_dense()(sp.data, sp.indptr, sp.indices, sp_m, sp_n, dn,
                             dn_m, dn_n, indptr, m, n, data, indices)

    return csr_matrix((data, indices, indptr), shape=(m, n))
Example #39
0
def min_column_type(x, expected_type):
    """
    Return the smallest dtype which can represent all
    elements of the `NumericalColumn` `x`
    If the column is not a subtype of `np.signedinteger` or `np.floating`
    returns the same dtype as the dtype of `x` without modification
    """

    if not isinstance(x, cudf.core.column.NumericalColumn):
        raise TypeError("Argument x must be of type column.NumericalColumn")
    if x.valid_count == 0:
        return x.dtype

    if np.issubdtype(x.dtype, np.floating):
        return get_min_float_dtype(x)

    elif np.issubdtype(expected_type, np.integer):
        max_bound_dtype = np.min_scalar_type(x.max())
        min_bound_dtype = np.min_scalar_type(x.min())
        result_type = np.promote_types(max_bound_dtype, min_bound_dtype)
    else:
        result_type = x.dtype

    return cudf.dtype(result_type)
Example #40
0
def testLargestSV(test):
    query = {TEST.TYPE_EXPECTED: np.float64}
    instance = test[TEST.INSTANCE]

    # account for "extra computation stage" (gram) in largestSV
    query[TEST.TOL_POWER] = test.get(TEST.TOL_POWER, 1.) * 2
    query[TEST.TOL_MINEPS] = _getTypeEps(safeTypeExpansion(instance.dtype))

    # determine reference result
    largestSV = np.linalg.svd(test[TEST.REFERENCE], compute_uv=False)[0]
    query[TEST.RESULT_REF] = np.array(largestSV,
                                      dtype=np.promote_types(
                                          largestSV.dtype, np.float64))

    # largestSV may not converge fast enough for a bad random starting point
    # so retry some times before throwing up
    for tries in range(9):
        maxSteps = 100. * 10.**(tries / 2.)
        query[TEST.RESULT_OUTPUT] = np.array(
            instance.getLargestSV(maxSteps=maxSteps, alwaysReturn=True))
        result = compareResults(test, query)
        if result[TEST.RESULT]:
            break
    return result
Example #41
0
 def normalize_binop_value(
         self, other: ScalarLike) -> Union[ColumnBase, ScalarLike]:
     if other is None:
         return other
     if isinstance(other, cudf.Scalar):
         if self.dtype == other.dtype:
             return other
         # expensive device-host transfer just to
         # adjust the dtype
         other = other.value
     elif isinstance(other, np.ndarray) and other.ndim == 0:
         other = other.item()
     other_dtype = np.min_scalar_type(other)
     if other_dtype.kind in {"b", "i", "u", "f"}:
         if isinstance(other, cudf.Scalar):
             return other
         other_dtype = np.promote_types(self.dtype, other_dtype)
         if other_dtype == np.dtype("float16"):
             other_dtype = np.dtype("float32")
             other = other_dtype.type(other)
         if self.dtype.kind == "b":
             other_dtype = min_signed_type(other)
         if np.isscalar(other):
             other = np.dtype(other_dtype).type(other)
             return other
         else:
             ary = utils.scalar_broadcast_to(other,
                                             size=len(self),
                                             dtype=other_dtype)
             return column.build_column(
                 data=Buffer(ary),
                 dtype=ary.dtype,
                 mask=self.mask,
             )
     else:
         raise TypeError(f"cannot broadcast {type(other)}")
Example #42
0
def np_promote_all_types(*dtypes):
    """ Return the largest NumPy datatype required to hold all types

    Parameters
    ----------
    dtypes : iterable
        NumPy datatypes to promote

    Returns
    -------
    np.dtype
        Smallest NumPy datatype required to store all input datatypes

    See Also
    --------
    np.promote_types
    """
    dtype = dtypes[0]
    if not all([dt == dtype for dt in dtypes[1:]]):
        logger.debug('Promoting memory allocation to largest datatype of '
                     'source bands')
        for _dtype in dtypes[1:]:
            dtype = np.promote_types(dtype, _dtype)
    return dtype
Example #43
0
def _qr_batched(a, mode):
    batch_shape = a.shape[:-2]
    batch_size = internal.prod(batch_shape)
    m, n = a.shape[-2:]

    # first handle any 0-size inputs
    if batch_size == 0 or m == 0 or n == 0:
        # support float32, float64, complex64, and complex128
        dtype, out_dtype = _util.linalg_common_type(a)
        if mode == 'raw':
            # compatibility with numpy.linalg.qr
            out_dtype = numpy.promote_types(out_dtype, 'd')

        if mode == 'reduced':
            return (cupy.empty(batch_shape + (m, 0), out_dtype),
                    cupy.empty(batch_shape + (0, n), out_dtype))
        elif mode == 'complete':
            q = _util.stacked_identity(batch_shape, m, out_dtype)
            return (q, cupy.empty(batch_shape + (m, n), out_dtype))
        elif mode == 'r':
            return cupy.empty(batch_shape + (0, n), out_dtype)
        elif mode == 'raw':
            return (cupy.empty(batch_shape + (n, m), out_dtype),
                    cupy.empty(batch_shape + (0, ), out_dtype))

    # ...then delegate real computation to cuSOLVER/rocSOLVER
    a = a.reshape(-1, *(a.shape[-2:]))
    out = _geqrf_orgqr_batched(a, mode)

    if mode == 'r':
        return out.reshape(batch_shape + out.shape[-2:])
    q, r = out
    q = q.reshape(batch_shape + q.shape[-2:])
    idx = -1 if mode == 'raw' else -2
    r.reshape(batch_shape + r.shape[idx:])
    return (q, r)
Example #44
0
def get_volume_pixeldata(sorted_slices):
    """
    the slice and intercept calculation can cause the slices to have different dtypes
    we should get the correct dtype that can cover all of them
    
    :type sorted_slices: list of slices
    :param sorted_slices: sliced sored in the correct order to create volume
    """
    slices = []
    combined_dtype = None
    for slice_ in sorted_slices:
        slice_data = _get_slice_pixeldata(slice_)
        slice_data = slice_data[numpy.newaxis, :, :]
        slices.append(slice_data)
        if combined_dtype is None:
            combined_dtype = slice_data.dtype
        else:
            combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)
    # create the new volume with with the correct data
    vol = numpy.concatenate(slices, axis=0)

    # Done
    vol = numpy.transpose(vol, (2, 1, 0))
    return vol
Example #45
0
def FISTA(fmatA, arrB, numLambda=0.1, numMaxSteps=100):
    '''
    Wrapper around the FISTA algrithm to allow processing of arrays of signals
        fmatA         - input system matrix
        arrB          - input data vector (measurements)
        numLambda     - balancing parameter in optimization problem
                        between data fidelity and sparsity
        numMaxSteps   - maximum number of steps to run
        numL          - step size during the conjugate gradient step
    '''

    if len(arrB.shape) > 2:
        raise ValueError("Only n x m arrays are supported for FISTA")

    # calculate the largest singular value to get the right step size
    numL = 1.0 / (fmatA.largestSV**2)
    t = 1
    arrX = np.zeros((fmatA.numM, arrB.shape[1]),
                    dtype=np.promote_types(np.float32, arrB.dtype))
    # initial arrY
    arrY = np.copy(arrX)
    # start iterating
    for numStep in range(numMaxSteps):
        arrXold = np.copy(arrX)
        # do the gradient step and threshold
        arrStep = arrY - numL * fmatA.backward(fmatA.forward(arrY) - arrB)

        arrX = _softThreshold(arrStep, numL * numLambda * 0.5)

        # update t
        tOld = t
        t = (1 + np.sqrt(1 + 4 * t**2)) / 2
        # update arrY
        arrY = arrX + ((tOld - 1) / t) * (arrX - arrXold)
    # return the unthresholded values for all non-zero support elements
    return np.where(arrX != 0, arrStep, arrX)
Example #46
0
def spsolve(A, b, permc_spec=None, use_umfpack=True):
    """Solve the sparse linear system Ax=b, where b may be a vector or a matrix.

    Parameters
    ----------
    A : ndarray or sparse matrix
        The square matrix A will be converted into CSC or CSR form
    b : ndarray or sparse matrix
        The matrix or vector representing the right hand side of the equation.
        If a vector, b.shape must be (n,) or (n, 1).
    permc_spec : str, optional
        How to permute the columns of the matrix for sparsity preservation.
        (default: 'COLAMD')

        - ``NATURAL``: natural ordering.
        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
        - ``COLAMD``: approximate minimum degree column ordering
    use_umfpack : bool, optional
        if True (default) then use umfpack for the solution.  This is
        only referenced if b is a vector and ``scikit-umfpack`` is installed.

    Returns
    -------
    x : ndarray or sparse matrix
        the solution of the sparse linear equation.
        If b is a vector, then x is a vector of size A.shape[1]
        If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])

    Notes
    -----
    For solving the matrix expression AX = B, this solver assumes the resulting
    matrix X is sparse, as is often the case for very sparse inputs.  If the
    resulting X is dense, the construction of this sparse result will be
    relatively expensive.  In that case, consider converting A to a dense
    matrix and using scipy.linalg.solve or its variants.

    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import spsolve
    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
    >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
    >>> x = spsolve(A, B)
    >>> np.allclose(A.dot(x).todense(), B.todense())
    True
    """
    if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
        A = csc_matrix(A)
        warn('spsolve requires A be CSC or CSR matrix format',
             SparseEfficiencyWarning)

    # b is a vector only if b have shape (n,) or (n, 1)
    b_is_sparse = isspmatrix(b)
    if not b_is_sparse:
        b = asarray(b)
    b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))

    A.sort_indices()
    A = A.asfptype()  # upcast to a floating point format
    result_dtype = np.promote_types(A.dtype, b.dtype)
    if A.dtype != result_dtype:
        A = A.astype(result_dtype)
    if b.dtype != result_dtype:
        b = b.astype(result_dtype)

    # validate input shapes
    M, N = A.shape
    if (M != N):
        raise ValueError("matrix must be square (has shape %s)" % ((M, N), ))

    if M != b.shape[0]:
        raise ValueError("matrix - rhs dimension mismatch (%s - %s)" %
                         (A.shape, b.shape[0]))

    use_umfpack = use_umfpack and useUmfpack

    if b_is_vector and use_umfpack:
        if b_is_sparse:
            b_vec = b.toarray()
        else:
            b_vec = b
        b_vec = asarray(b_vec, dtype=A.dtype).ravel()

        if noScikit:
            raise RuntimeError('Scikits.umfpack not installed.')

        if A.dtype.char not in 'dD':
            raise ValueError("convert matrix data to double, please, using"
                             " .astype(), or set linsolve.useUmfpack = False")

        umf = umfpack.UmfpackContext(_get_umf_family(A))
        x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, autoTranspose=True)
    else:
        if b_is_vector and b_is_sparse:
            b = b.toarray()
            b_is_sparse = False

        if not b_is_sparse:
            if isspmatrix_csc(A):
                flag = 1  # CSC format
            else:
                flag = 0  # CSR format

            options = dict(ColPerm=permc_spec)
            x, info = _superlu.gssv(N,
                                    A.nnz,
                                    A.data,
                                    A.indices,
                                    A.indptr,
                                    b,
                                    flag,
                                    options=options)
            if info != 0:
                warn("Matrix is exactly singular", MatrixRankWarning)
                x.fill(np.nan)
            if b_is_vector:
                x = x.ravel()
        else:
            # b is sparse
            Afactsolve = factorized(A)

            if not isspmatrix_csc(b):
                warn(
                    'spsolve is more efficient when sparse b '
                    'is in the CSC matrix format', SparseEfficiencyWarning)
                b = csc_matrix(b)

            # Create a sparse output matrix by repeatedly applying
            # the sparse factorization to solve columns of b.
            data_segs = []
            row_segs = []
            col_segs = []
            for j in range(b.shape[1]):
                bj = b[:, j].A.ravel()
                xj = Afactsolve(bj)
                w = np.flatnonzero(xj)
                segment_length = w.shape[0]
                row_segs.append(w)
                col_segs.append(np.ones(segment_length, dtype=int) * j)
                data_segs.append(np.asarray(xj[w], dtype=A.dtype))
            sparse_data = np.concatenate(data_segs)
            sparse_row = np.concatenate(row_segs)
            sparse_col = np.concatenate(col_segs)
            x = A.__class__((sparse_data, (sparse_row, sparse_col)),
                            shape=b.shape,
                            dtype=A.dtype)

    return x
Example #47
0
def resample(data_in, x_in, x_out, y, data_out=None, kind='linear'):
    """Resample the data of one spectrum using interpolation.

    Dependent variables y1, y2, ... in the input data are resampled in the
    independent variable x using interpolation models y1(x), y2(x), ...
    evaluated on a new grid of x values. The independent variable will
    typically be a wavelength or frequency and the independent variables can
    be fluxes, inverse variances, etc.

    Interpolation is intended for cases where the input and output grids have
    comparable densities. When neighboring samples are correlated, the
    resampling process should be essentially lossless.  When the output
    grid is sparser than the input grid, it may be more appropriate to
    "downsample", i.e., average dependent variables over consecutive ranges
    of input samples.

    The basic usage of this function is:

    >>> data = np.ones((5,),
    ... [('wlen', float), ('flux', float), ('ivar', float)])
    >>> data['wlen'] = np.arange(4000, 5000, 200)
    >>> wlen_out = np.arange(4100, 4700, 200)
    >>> out = resample(data, 'wlen', wlen_out, ('flux', 'ivar'))
    >>> np.all(out ==
    ... np.array([(4100, 1.0, 1.0), (4300, 1.0, 1.0), (4500, 1.0, 1.0)],
    ... dtype=[('wlen', '<i8'), ('flux', '<f8'), ('ivar', '<f8')]))
    True

    The input grid can also be external to the structured array of spectral
    data, for example:

    >>> data = np.ones((5,), [('flux', float), ('ivar', float)])
    >>> wlen_in = np.arange(4000, 5000, 200)
    >>> wlen_out = np.arange(4100, 4900, 200)
    >>> out = resample(data, wlen_in, wlen_out, ('flux', 'ivar'))
    >>> np.all(out ==
    ... np.array([(1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0)],
    ... dtype=[('flux', '<f8'), ('ivar', '<f8')]))
    True

    If the output grid extends beyond the input grid, a `masked array
    <http://docs.scipy.org/doc/numpy/reference/maskedarray.html>`__ will be
    returned with any values requiring extrapolation masked:

    >>> wlen_out = np.arange(3500, 5500, 500)
    >>> out = resample(data, wlen_in, wlen_out, 'flux')
    >>> np.all(out.mask ==
    ... np.array([(True,), (False,), (False,), (True,)],
    ... dtype=[('flux', 'bool')]))
    True

    If the input data is masked, any output interpolated values that depend on
    an input masked value will be masked in the output:

    >>> data = ma.ones((5,), [('flux', float), ('ivar', float)])
    >>> data['flux'][2] = ma.masked
    >>> wlen_out = np.arange(4100, 4900, 200)
    >>> out = resample(data, wlen_in, wlen_out, 'flux')
    >>> np.all(out.mask ==
    ... np.array([(False,), (True,), (True,), (False,)],
    ... dtype=[('flux', 'bool')]))
    True

    Interpolation is performed using :class:`scipy.interpolate.inter1pd`.

    Parameters
    ----------
    data_in : numpy.ndarray or numpy.ma.MaskedArray
        Structured numpy array of input spectral data to resample. The input
        array must be one-dimensional.
    x_in : string or numpy.ndarray
        A field name in data_in containing the independent variable to use
        for interpolation, or else an array of values with the same shape
        as the input data.
    x_out : numpy.ndarray
        An array of values for the independent variable where interpolation
        models should be evaluated to calculate the output values.
    y : string or iterable of strings.
        A field name or a list of field names present in the input data that
        should be resampled by interpolation and included in the output.
    data_out : numpy.ndarray or None
        Structured numpy array where the output result should be written. If
        None is specified, then an appropriately sized array will be allocated
        and returned. Use this method to take control of the memory allocation
        and, for example, re-use the same array when resampling many spectra.
    kind : string or integer
        Specify the kind of interpolation models to build using any of the
        forms allowed by :class:`scipy.interpolate.inter1pd`.  If any input
        dependent values are masked, only the ``nearest` and ``linear``
        values are allowed.

    Returns
    -------
    numpy.ndarray or numpy.ma.MaskedArray
        Structured numpy array of the resampled result containing all ``y``
        fields and (if ``x_in`` is specified as a string) the output ``x``
        field.  The output will be a :class:`numpy.ma.MaskedArray` if ``x_out``
        extends beyond ``x_in`` or if ``data_in`` is masked.
    """
    if not isinstance(data_in, np.ndarray):
        raise ValueError('Invalid data_in type: {0}.'.format(type(data_in)))
    if data_in.dtype.fields is None:
        raise ValueError('Input data_in is not a structured array.')
    if len(data_in.shape) > 1:
        raise ValueError('Input data_in is multidimensional.')

    if isinstance(x_in, str):
        if x_in not in data_in.dtype.names:
            raise ValueError('No such x_in field: {0}.'.format(x_in))
        x_out_name = x_in
        x_in = data_in[x_in]
    else:
        if not isinstance(x_in, np.ndarray):
            raise ValueError('Invalid x_in type: {0}.'.format(type(x_in)))
        if x_in.shape != data_in.shape:
            raise ValueError('Incompatible shapes for x_in and data_in.')
        x_out_name = None

    if not isinstance(x_out, np.ndarray):
        raise ValueError('Invalid x_out type: {0}.'.format(type(data_out)))

    if ma.isMA(x_in) and np.any(x_in.mask):
        raise ValueError('Cannot resample masked x_in.')

    x_type = np.promote_types(x_in.dtype, x_out.dtype)

    dtype_out = []
    if x_out_name is not None:
        dtype_out.append((x_out_name, x_out.dtype))

    if isinstance(y, str):
        # Use a list instead of a tuple here so y_names can be used
        # to index data_in below.
        y_names = [y,]
    else:
        try:
            y_names = [name for name in y]
        except TypeError:
            raise ValueError('Invalid y type: {0}.'.format(type(y)))
    for not_first, y in enumerate(y_names):
        if y not in data_in.dtype.names:
            raise ValueError('No such y field: {0}.'.format(y))
        if not_first:
            if data_in[y].dtype != y_type:
                raise ValueError('All y fields must have the same type.')
        else:
            y_type = data_in[y].dtype
        dtype_out.append((y, y_type))

    y_shape = (len(y_names),)
    if ma.isMA(data_in):
        # Copy the structured 1D array into a 2D unstructured array
        # and set masked values to NaN.
        y_in = np.zeros(data_in.shape + y_shape, y_type)
        for i,y in enumerate(y_names):
            y_in[:,i] = data_in[y].filled(np.nan)
    else:
        if pkgr.parse_version(np.__version__)  >= pkgr.parse_version('1.16'):
            # The slicing does not work in numpy 1.16 and above
            # we use structured_to_unstructured to get the slice that we care about
            y_in = rfn.structured_to_unstructured(
                       data_in[y_names]).reshape(data_in.shape + y_shape)
        else:
            y_in = data_in[y_names]
            # View the structured 1D array as a 2D unstructured array (without
            # copying any memory).
            y_in = y_in.view(y_type).reshape(data_in.shape + y_shape)
       
    # interp1d will only propagate NaNs correctly for certain values of `kind`.
    # With numpy = 1.6 or 1.7, only 'nearest' and 'linear' work.
    # With numpy = 1.8 or 1.9, 'slinear' and kind = 0 or 1 also work.
    if np.any(np.isnan(y_in)):
        if kind not in ('nearest', 'linear'):
            raise ValueError(
                'Interpolation kind not supported for masked data: {0}.'
                .format(kind))
    try:
        interpolator = scipy.interpolate.interp1d(
            x_in, y_in, kind=kind, axis=0, copy=False,
            bounds_error=False, fill_value=np.nan)
    except NotImplementedError:
        raise ValueError('Interpolation kind not supported: {0}.'.format(kind))

    shape_out = (len(x_out),)
    if data_out is None:
        data_out = np.empty(shape_out, dtype_out)
    else:
        if data_out.shape != shape_out:
            raise ValueError(
                'data_out has wrong shape: {0}. Expected: {1}.'
                .format(data_out.shape, shape_out))
        if data_out.dtype != dtype_out:
            raise ValueError(
                'data_out has wrong dtype: {0}. Expected: {1}.'
                .format(data_out.dtype, dtype_out))

    if x_out_name is not None:
        data_out[x_out_name][:] = x_out
    y_out = interpolator(x_out)
    for i,y in enumerate(y_names):
        data_out[y][:] = y_out[:,i]

    if ma.isMA(data_in) or np.any(np.isnan(y_out)):
        data_out = ma.MaskedArray(data_out)
        data_out.mask = False
        for y in y_names:
            data_out[y].mask = np.isnan(data_out[y].data)

    return data_out
Example #48
0
 def _concrete_matmul_(self, other: "LocalOperator") -> "LocalOperator":
     if not isinstance(other, LocalOperator):
         return NotImplemented
     op = self.copy(dtype=np.promote_types(self.dtype, _dtype(other)))
     op @= other
     return op
Example #49
0
def spsolve(A, b, permc_spec=None, use_umfpack=True):
    """Solve the sparse linear system Ax=b, where b may be a vector or a matrix.

    Parameters
    ----------
    A : ndarray or sparse matrix
        The square matrix A will be converted into CSC or CSR form
    b : ndarray or sparse matrix
        The matrix or vector representing the right hand side of the equation.
        If a vector, b.shape must be (n,) or (n, 1).
    permc_spec : str, optional
        How to permute the columns of the matrix for sparsity preservation.
        (default: 'COLAMD')

        - ``NATURAL``: natural ordering.
        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
        - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.

    use_umfpack : bool, optional
        if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
        [6]_ . This is only referenced if b is a vector and
        ``scikits.umfpack`` is installed.

    Returns
    -------
    x : ndarray or sparse matrix
        the solution of the sparse linear equation.
        If b is a vector, then x is a vector of size A.shape[1]
        If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])

    Notes
    -----
    For solving the matrix expression AX = B, this solver assumes the resulting
    matrix X is sparse, as is often the case for very sparse inputs.  If the
    resulting X is dense, the construction of this sparse result will be
    relatively expensive.  In that case, consider converting A to a dense
    matrix and using scipy.linalg.solve or its variants.

    References
    ----------
    .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
           COLAMD, an approximate column minimum degree ordering algorithm,
           ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
           :doi:`10.1145/1024074.1024080`

    .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
           minimum degree ordering algorithm, ACM Trans. on Mathematical
           Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`

    .. [3] T. A. Davis, Algorithm 832:  UMFPACK - an unsymmetric-pattern
           multifrontal method with a column pre-ordering strategy, ACM
           Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
           https://dl.acm.org/doi/abs/10.1145/992200.992206

    .. [4] T. A. Davis, A column pre-ordering strategy for the
           unsymmetric-pattern multifrontal method, ACM Trans.
           on Mathematical Software, 30(2), 2004, pp. 165--195.
           https://dl.acm.org/doi/abs/10.1145/992200.992205

    .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
           method for unsymmetric sparse matrices, ACM Trans. on
           Mathematical Software, 25(1), 1999, pp. 1--19.
           https://doi.org/10.1145/305658.287640

    .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
           method for sparse LU factorization, SIAM J. Matrix Analysis and
           Computations, 18(1), 1997, pp. 140--158.
           https://doi.org/10.1137/S0895479894246905T.


    Examples
    --------
    >>> from scipy.sparse import csc_matrix
    >>> from scipy.sparse.linalg import spsolve
    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
    >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
    >>> x = spsolve(A, B)
    >>> np.allclose(A.dot(x).toarray(), B.toarray())
    True
    """

    if is_pydata_spmatrix(A):
        A = A.to_scipy_sparse().tocsc()

    if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
        A = csc_matrix(A)
        warn('spsolve requires A be CSC or CSR matrix format',
             SparseEfficiencyWarning)

    # b is a vector only if b have shape (n,) or (n, 1)
    b_is_sparse = isspmatrix(b) or is_pydata_spmatrix(b)
    if not b_is_sparse:
        b = asarray(b)
    b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))

    # sum duplicates for non-canonical format
    A.sum_duplicates()
    A = A.asfptype()  # upcast to a floating point format
    result_dtype = np.promote_types(A.dtype, b.dtype)
    if A.dtype != result_dtype:
        A = A.astype(result_dtype)
    if b.dtype != result_dtype:
        b = b.astype(result_dtype)

    # validate input shapes
    M, N = A.shape
    if (M != N):
        raise ValueError("matrix must be square (has shape %s)" % ((M, N), ))

    if M != b.shape[0]:
        raise ValueError("matrix - rhs dimension mismatch (%s - %s)" %
                         (A.shape, b.shape[0]))

    use_umfpack = use_umfpack and useUmfpack

    if b_is_vector and use_umfpack:
        if b_is_sparse:
            b_vec = b.toarray()
        else:
            b_vec = b
        b_vec = asarray(b_vec, dtype=A.dtype).ravel()

        if noScikit:
            raise RuntimeError('Scikits.umfpack not installed.')

        if A.dtype.char not in 'dD':
            raise ValueError("convert matrix data to double, please, using"
                             " .astype(), or set linsolve.useUmfpack = False")

        umf_family, A = _get_umf_family(A)
        umf = umfpack.UmfpackContext(umf_family)
        x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, autoTranspose=True)
    else:
        if b_is_vector and b_is_sparse:
            b = b.toarray()
            b_is_sparse = False

        if not b_is_sparse:
            if isspmatrix_csc(A):
                flag = 1  # CSC format
            else:
                flag = 0  # CSR format

            options = dict(ColPerm=permc_spec)
            x, info = _superlu.gssv(N,
                                    A.nnz,
                                    A.data,
                                    A.indices,
                                    A.indptr,
                                    b,
                                    flag,
                                    options=options)
            if info != 0:
                warn("Matrix is exactly singular", MatrixRankWarning)
                x.fill(np.nan)
            if b_is_vector:
                x = x.ravel()
        else:
            # b is sparse
            Afactsolve = factorized(A)

            if not (isspmatrix_csc(b) or is_pydata_spmatrix(b)):
                warn(
                    'spsolve is more efficient when sparse b '
                    'is in the CSC matrix format', SparseEfficiencyWarning)
                b = csc_matrix(b)

            # Create a sparse output matrix by repeatedly applying
            # the sparse factorization to solve columns of b.
            data_segs = []
            row_segs = []
            col_segs = []
            for j in range(b.shape[1]):
                # TODO: replace this with
                # bj = b[:, j].toarray().ravel()
                # once 1D sparse arrays are supported.
                # That is a slightly faster code path.
                bj = b[:, [j]].toarray().ravel()
                xj = Afactsolve(bj)
                w = np.flatnonzero(xj)
                segment_length = w.shape[0]
                row_segs.append(w)
                col_segs.append(np.full(segment_length, j, dtype=int))
                data_segs.append(np.asarray(xj[w], dtype=A.dtype))
            sparse_data = np.concatenate(data_segs)
            sparse_row = np.concatenate(row_segs)
            sparse_col = np.concatenate(col_segs)
            x = A.__class__((sparse_data, (sparse_row, sparse_col)),
                            shape=b.shape,
                            dtype=A.dtype)

            if is_pydata_spmatrix(b):
                x = b.__class__(x)

    return x
Example #50
0
def cholesky(a):
    """Cholesky decomposition.

    Decompose a given two-dimensional square matrix into ``L * L.T``,
    where ``L`` is a lower-triangular matrix and ``.T`` is a conjugate
    transpose operator.

    Args:
        a (cupy.ndarray): The input matrix with dimension ``(N, N)``

    Returns:
        cupy.ndarray: The lower-triangular matrix.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. seealso:: :func:`numpy.linalg.cholesky`
    """
    _util._assert_cupy_array(a)
    _util._assert_nd_squareness(a)

    if a.ndim > 2:
        return _potrf_batched(a)

    if a.dtype.char == 'f' or a.dtype.char == 'd':
        dtype = a.dtype.char
    else:
        dtype = numpy.promote_types(a.dtype.char, 'f').char

    x = a.astype(dtype, order='C', copy=True)
    n = len(a)
    handle = device.get_cusolver_handle()
    dev_info = cupy.empty(1, dtype=numpy.int32)

    if dtype == 'f':
        potrf = cusolver.spotrf
        potrf_bufferSize = cusolver.spotrf_bufferSize
    elif dtype == 'd':
        potrf = cusolver.dpotrf
        potrf_bufferSize = cusolver.dpotrf_bufferSize
    elif dtype == 'F':
        potrf = cusolver.cpotrf
        potrf_bufferSize = cusolver.cpotrf_bufferSize
    else:  # dtype == 'D':
        potrf = cusolver.zpotrf
        potrf_bufferSize = cusolver.zpotrf_bufferSize

    buffersize = potrf_bufferSize(handle, cublas.CUBLAS_FILL_MODE_UPPER, n,
                                  x.data.ptr, n)
    workspace = cupy.empty(buffersize, dtype=dtype)
    potrf(handle, cublas.CUBLAS_FILL_MODE_UPPER, n, x.data.ptr, n,
          workspace.data.ptr, buffersize, dev_info.data.ptr)
    cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
        potrf, dev_info)

    _util._tril(x, k=0)
    return x
Example #51
0
def cov(a, y=None, rowvar=True, bias=False, ddof=None):
    """Returns the covariance matrix of an array.

    This function currently does not support ``fweights`` and ``aweights``
    options.

    Args:
        a (cupy.ndarray): Array to compute covariance matrix.
        y (cupy.ndarray): An additional set of variables and observations.
        rowvar (bool): If ``True``, then each row represents a variable, with
            observations in the columns. Otherwise, the relationship is
            transposed.
        bias (bool): If ``False``, normalization is by ``(N - 1)``, where N is
            the number of observations given (unbiased estimate). If ``True``,
            then normalization is by ``N``.
        ddof (int): If not ``None`` the default value implied by bias is
            overridden. Note that ``ddof=1`` will return the unbiased estimate
            and ``ddof=0`` will return the simple average.

    Returns:
        cupy.ndarray: The covariance matrix of the input array.

    .. seealso:: :func:`numpy.cov`

    """
    if ddof is not None and ddof != int(ddof):
        raise ValueError('ddof must be integer')

    if a.ndim > 2:
        raise ValueError('Input must be <= 2-d')

    if y is None:
        dtype = numpy.promote_types(a.dtype, numpy.float64)
    else:
        if y.ndim > 2:
            raise ValueError('y must be <= 2-d')
        dtype = functools.reduce(numpy.promote_types,
                                 (a.dtype, y.dtype, numpy.float64))

    X = cupy.array(a, ndmin=2, dtype=dtype)
    if not rowvar and X.shape[0] != 1:
        X = X.T
    if X.shape[0] == 0:
        return cupy.array([]).reshape(0, 0)
    if y is not None:
        y = cupy.array(y, copy=False, ndmin=2, dtype=dtype)
        if not rowvar and y.shape[0] != 1:
            y = y.T
        X = core.concatenate_method((X, y), axis=0)

    if ddof is None:
        ddof = 0 if bias else 1

    fact = X.shape[1] - ddof
    if fact <= 0:
        warnings.warn('Degrees of freedom <= 0 for slice',
                      RuntimeWarning,
                      stacklevel=2)
        fact = 0.0

    X -= X.mean(axis=1)[:, None]
    out = X.dot(X.T.conj()) * (1 / cupy.float64(fact))

    return out.squeeze()
    def predict_conditional(self, params):
        """
        In-sample prediction, conditional on the current and previous regime

        Parameters
        ----------
        params : array_like
            Array of parameters at which to create predictions.

        Returns
        -------
        predict : array_like
            Array of predictions conditional on current, and possibly past,
            regimes
        """
        params = np.array(params, ndmin=1)

        # Prediction is based on:
        # y_t = x_t beta^{(S_t)} +
        #       \phi_1^{(S_t)} (y_{t-1} - x_{t-1} beta^{(S_t-1)}) + ...
        #       \phi_p^{(S_t)} (y_{t-p} - x_{t-p} beta^{(S_t-p)}) + eps_t
        if self._k_exog > 0:
            xb = []
            for i in range(self.k_regimes):
                coeffs = params[self.parameters[i, 'exog']]
                xb.append(np.dot(self.orig_exog, coeffs))

        predict = np.zeros(
            (self.k_regimes,) * (self.order + 1) + (self.nobs,),
            dtype=np.promote_types(np.float64, params.dtype))
        # Iterate over S_{t} = i
        for i in range(self.k_regimes):
            ar_coeffs = params[self.parameters[i, 'autoregressive']]

            # y_t - x_t beta^{(S_t)}
            ix = self._predict_slices[:]
            ix[0] = i
            ix = tuple(ix)
            if self._k_exog > 0:
                predict[ix] += xb[i][self.order:]

            # Iterate over j = 2, .., p
            for j in range(1, self.order + 1):
                for k in range(self.k_regimes):
                    # This gets a specific time-period / regime slice:
                    # S_{t} = i, S_{t-j} = k, across all other time-period /
                    # regime slices.
                    ix = self._predict_slices[:]
                    ix[0] = i
                    ix[j] = k
                    ix = tuple(ix)

                    start = self.order - j
                    end = -j
                    if self._k_exog > 0:
                        predict[ix] += ar_coeffs[j-1] * (
                            self.orig_endog[start:end] - xb[k][start:end])
                    else:
                        predict[ix] += ar_coeffs[j-1] * (
                            self.orig_endog[start:end])

        return predict
Example #53
0
def qr(a, mode='reduced'):
    """QR decomposition.

    Decompose a given two-dimensional matrix into ``Q * R``, where ``Q``
    is an orthonormal and ``R`` is an upper-triangular matrix.

    Args:
        a (cupy.ndarray): The input matrix.
        mode (str): The mode of decomposition. Currently 'reduced',
            'complete', 'r', and 'raw' modes are supported. The default mode
            is 'reduced', in which matrix ``A = (M, N)`` is decomposed into
            ``Q``, ``R`` with dimensions ``(M, K)``, ``(K, N)``, where
            ``K = min(M, N)``.

    Returns:
        cupy.ndarray, or tuple of ndarray:
            Although the type of returned object depends on the mode,
            it returns a tuple of ``(Q, R)`` by default.
            For details, please see the document of :func:`numpy.linalg.qr`.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. seealso:: :func:`numpy.linalg.qr`
    """
    # TODO(Saito): Current implementation only accepts two-dimensional arrays
    _util._assert_cupy_array(a)
    _util._assert_rank2(a)

    if mode not in ('reduced', 'complete', 'r', 'raw'):
        if mode in ('f', 'full', 'e', 'economic'):
            msg = 'The deprecated mode \'{}\' is not supported'.format(mode)
            raise ValueError(msg)
        else:
            raise ValueError('Unrecognized mode \'{}\''.format(mode))

    # support float32, float64, complex64, and complex128
    if a.dtype.char in 'fdFD':
        dtype = a.dtype.char
    else:
        dtype = numpy.promote_types(a.dtype.char, 'f').char

    m, n = a.shape
    mn = min(m, n)
    if mn == 0:
        if mode == 'reduced':
            return cupy.empty((m, 0), dtype), cupy.empty((0, n), dtype)
        elif mode == 'complete':
            return cupy.identity(m, dtype), cupy.empty((m, n), dtype)
        elif mode == 'r':
            return cupy.empty((0, n), dtype)
        else:  # mode == 'raw'
            # compatibility with numpy.linalg.qr
            dtype = numpy.promote_types(dtype, 'd')
            return cupy.empty((n, m), dtype), cupy.empty((0, ), dtype)

    x = a.transpose().astype(dtype, order='C', copy=True)
    handle = device.get_cusolver_handle()
    dev_info = cupy.empty(1, dtype=numpy.int32)

    if dtype == 'f':
        geqrf_bufferSize = cusolver.sgeqrf_bufferSize
        geqrf = cusolver.sgeqrf
    elif dtype == 'd':
        geqrf_bufferSize = cusolver.dgeqrf_bufferSize
        geqrf = cusolver.dgeqrf
    elif dtype == 'F':
        geqrf_bufferSize = cusolver.cgeqrf_bufferSize
        geqrf = cusolver.cgeqrf
    elif dtype == 'D':
        geqrf_bufferSize = cusolver.zgeqrf_bufferSize
        geqrf = cusolver.zgeqrf
    else:
        msg = ('dtype must be float32, float64, complex64 or complex128'
               ' (actual: {})'.format(a.dtype))
        raise ValueError(msg)

    # compute working space of geqrf and solve R
    buffersize = geqrf_bufferSize(handle, m, n, x.data.ptr, n)
    workspace = cupy.empty(buffersize, dtype=dtype)
    tau = cupy.empty(mn, dtype=dtype)
    geqrf(handle, m, n, x.data.ptr, m, tau.data.ptr, workspace.data.ptr,
          buffersize, dev_info.data.ptr)
    cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
        geqrf, dev_info)

    if mode == 'r':
        r = x[:, :mn].transpose()
        return _util._triu(r)

    if mode == 'raw':
        if a.dtype.char == 'f':
            # The original numpy.linalg.qr returns float64 in raw mode,
            # whereas the cusolver returns float32. We agree that the
            # following code would be inappropriate, however, in this time
            # we explicitly convert them to float64 for compatibility.
            return x.astype(numpy.float64), tau.astype(numpy.float64)
        elif a.dtype.char == 'F':
            # The same applies to complex64
            return x.astype(numpy.complex128), tau.astype(numpy.complex128)
        return x, tau

    if mode == 'complete' and m > n:
        mc = m
        q = cupy.empty((m, m), dtype)
    else:
        mc = mn
        q = cupy.empty((n, m), dtype)
    q[:n] = x

    # compute working space of orgqr and solve Q
    if dtype == 'f':
        orgqr_bufferSize = cusolver.sorgqr_bufferSize
        orgqr = cusolver.sorgqr
    elif dtype == 'd':
        orgqr_bufferSize = cusolver.dorgqr_bufferSize
        orgqr = cusolver.dorgqr
    elif dtype == 'F':
        orgqr_bufferSize = cusolver.cungqr_bufferSize
        orgqr = cusolver.cungqr
    elif dtype == 'D':
        orgqr_bufferSize = cusolver.zungqr_bufferSize
        orgqr = cusolver.zungqr

    buffersize = orgqr_bufferSize(handle, m, mc, mn, q.data.ptr, m,
                                  tau.data.ptr)
    workspace = cupy.empty(buffersize, dtype=dtype)
    orgqr(handle, m, mc, mn, q.data.ptr, m, tau.data.ptr, workspace.data.ptr,
          buffersize, dev_info.data.ptr)
    cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
        orgqr, dev_info)

    q = q[:mc].transpose()
    r = x[:, :mc].transpose()
    return q, _util._triu(r)
Example #54
0
def promote_types(type1, type2):  # pylint: disable=missing-function-docstring
    type1 = _to_numpy_type(type1)
    type2 = _to_numpy_type(type2)
    return np_dtypes.canonicalize_dtype(np.promote_types(type1, type2))
Example #55
0
    def _declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None):
        """
        Store subjacobian metadata for later use.

        Parameters
        ----------
        of : str or list of str
            The name of the residual(s) that derivatives are being computed for.
            May also contain a glob pattern.
        wrt : str or list of str
            The name of the variables that derivatives are taken with respect to.
            This can contain the name of any input or output variable.
            May also contain a glob pattern.
        dependent : bool(True)
            If False, specifies no dependence between the output(s) and the
            input(s). This is only necessary in the case of a sparse global
            jacobian, because if 'dependent=False' is not specified and
            declare_partials is not called for a given pair, then a dense
            matrix of zeros will be allocated in the sparse global jacobian
            for that pair.  In the case of a dense global jacobian it doesn't
            matter because the space for a dense subjac will always be
            allocated for every pair.
        rows : ndarray of int or None
            Row indices for each nonzero entry.  For sparse subjacobians only.
        cols : ndarray of int or None
            Column indices for each nonzero entry.  For sparse subjacobians only.
        val : float or ndarray of float or scipy.sparse
            Value of subjacobian.  If rows and cols are not None, this will
            contain the values found at each (row, col) location in the subjac.
        """
        if dependent and val is not None and not issparse(val):
            val = np.atleast_1d(val)
            # np.promote_types  will choose the smallest dtype that can contain both arguments
            safe_dtype = np.promote_types(val.dtype, float)
            val = val.astype(safe_dtype, copy=False)

        if dependent and rows is not None:
            rows = np.array(rows, dtype=int, copy=False)
            cols = np.array(cols, dtype=int, copy=False)

            if rows.shape != cols.shape:
                raise ValueError('rows and cols must have the same shape,'
                                 ' rows: {}, cols: {}'.format(rows.shape, cols.shape))

            if val is not None and val.shape != (1,) and rows.shape != val.shape:
                raise ValueError('If rows and cols are specified, val must be a scalar or have the '
                                 'same shape, val: {}, rows/cols: {}'.format(val.shape, rows.shape))

            if val is None:
                val = np.zeros_like(rows, dtype=float)

        pattern_matches = self._find_partial_matches(of, wrt)

        multiple_items = False

        for of_bundle, wrt_bundle in product(*pattern_matches):
            of_pattern, of_matches = of_bundle
            wrt_pattern, wrt_matches = wrt_bundle
            if not of_matches:
                raise ValueError('No matches were found for of="{}"'.format(of_pattern))
            if not wrt_matches:
                raise ValueError('No matches were found for wrt="{}"'.format(wrt_pattern))

            make_copies = (multiple_items
                           or len(of_matches) > 1
                           or len(wrt_matches) > 1)
            # Setting this to true means that future loop iterations (i.e. if there are multiple
            # items in either of or wrt) will make copies.
            multiple_items = True

            for rel_key in product(of_matches, wrt_matches):
                abs_key = rel_key2abs_key(self, rel_key)
                if not dependent:
                    if abs_key in self._subjacs_info:
                        del self._subjacs_info[abs_key]
                    continue

                if abs_key in self._subjacs_info:
                    meta = self._subjacs_info[abs_key]
                else:
                    meta = SUBJAC_META_DEFAULTS.copy()
                meta['rows'] = rows
                meta['cols'] = cols
                meta['value'] = deepcopy(val) if make_copies else val
                meta['dependent'] = dependent
                self._check_partials_meta(abs_key, meta)
                self._subjacs_info[abs_key] = meta
Example #56
0
def inv(a):
    """Computes the inverse of a matrix.

    This function computes matrix ``a_inv`` from n-dimensional regular matrix
    ``a`` such that ``dot(a, a_inv) == eye(n)``.

    Args:
        a (cupy.ndarray): The regular matrix

    Returns:
        cupy.ndarray: The inverse of a matrix.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. seealso:: :func:`numpy.linalg.inv`
    """
    if a.ndim >= 3:
        return _batched_inv(a)

    # to prevent `a` to be overwritten
    a = a.copy()

    util._assert_cupy_array(a)
    util._assert_rank2(a)
    util._assert_nd_squareness(a)

    # support float32, float64, complex64, and complex128
    if a.dtype.char in 'fdFD':
        dtype = a.dtype.char
    else:
        dtype = numpy.promote_types(a.dtype.char, 'f')

    cusolver_handle = device.get_cusolver_handle()
    dev_info = cupy.empty(1, dtype=numpy.int32)

    ipiv = cupy.empty((a.shape[0], 1), dtype=numpy.intc)

    if dtype == 'f':
        getrf = cusolver.sgetrf
        getrf_bufferSize = cusolver.sgetrf_bufferSize
        getrs = cusolver.sgetrs
    elif dtype == 'd':
        getrf = cusolver.dgetrf
        getrf_bufferSize = cusolver.dgetrf_bufferSize
        getrs = cusolver.dgetrs
    elif dtype == 'F':
        getrf = cusolver.cgetrf
        getrf_bufferSize = cusolver.cgetrf_bufferSize
        getrs = cusolver.cgetrs
    elif dtype == 'D':
        getrf = cusolver.zgetrf
        getrf_bufferSize = cusolver.zgetrf_bufferSize
        getrs = cusolver.zgetrs
    else:
        msg = ('dtype must be float32, float64, complex64 or complex128'
               ' (actual: {})'.format(a.dtype))
        raise ValueError(msg)

    m = a.shape[0]

    buffersize = getrf_bufferSize(cusolver_handle, m, m, a.data.ptr, m)
    workspace = cupy.empty(buffersize, dtype=dtype)

    # LU factorization
    getrf(cusolver_handle, m, m, a.data.ptr, m, workspace.data.ptr,
          ipiv.data.ptr, dev_info.data.ptr)
    cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
        getrf, dev_info)

    b = cupy.eye(m, dtype=dtype)

    # solve for the inverse
    getrs(cusolver_handle, 0, m, m, a.data.ptr, m, ipiv.data.ptr, b.data.ptr,
          m, dev_info.data.ptr)
    cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
        getrs, dev_info)

    return b
Example #57
0
    def __init__(
        self,
        mean: Union[float, np.floating, np.ndarray, linops.LinearOperator],
        cov: Union[float, np.floating, np.ndarray, linops.LinearOperator],
        cov_cholesky: Optional[Union[float, np.floating, np.ndarray,
                                     linops.LinearOperator]] = None,
        random_state: RandomStateArgType = None,
    ):
        # Type normalization
        if np.isscalar(mean):
            mean = _utils.as_numpy_scalar(mean)

        if np.isscalar(cov):
            cov = _utils.as_numpy_scalar(cov)

        if np.isscalar(cov_cholesky):
            cov_cholesky = _utils.as_numpy_scalar(cov_cholesky)

        # Data type normalization
        dtype = np.promote_types(mean.dtype, cov.dtype)

        if not np.issubdtype(dtype, np.floating):
            dtype = np.dtype(np.double)

        mean = mean.astype(dtype, order="C", casting="safe", copy=False)
        cov = cov.astype(dtype, order="C", casting="safe", copy=False)

        # Shape checking
        if not 0 <= mean.ndim <= 2:
            raise ValueError(
                f"Gaussian random variables must either be scalars, vectors, or "
                f"matrices (or linear operators), but the given mean is a {mean.ndim}-"
                f"dimensional tensor.")

        expected_cov_shape = (np.prod(mean.shape), ) * 2 if len(
            mean.shape) > 0 else ()

        if cov.shape != expected_cov_shape:
            raise ValueError(
                f"The covariance matrix must be of shape {expected_cov_shape}, but "
                f"shape {cov.shape} was given.")

        self._mean = mean
        self._cov = cov

        self._compute_cov_cholesky: Callable[[], _ValueType] = None
        self._cov_cholesky = cov_cholesky  # recall: None if not provided

        # Method selection
        univariate = mean.ndim == 0
        dense = isinstance(mean, np.ndarray) and isinstance(cov, np.ndarray)
        cov_operator = isinstance(cov, linops.LinearOperator)

        if univariate:
            # Univariate Gaussian
            sample = self._univariate_sample
            in_support = Normal._univariate_in_support
            pdf = self._univariate_pdf
            logpdf = self._univariate_logpdf
            cdf = self._univariate_cdf
            logcdf = self._univariate_logcdf
            quantile = self._univariate_quantile

            median = lambda: self._mean
            var = lambda: self._cov
            entropy = self._univariate_entropy

            self._compute_cov_cholesky = self._univariate_cov_cholesky

        elif dense or cov_operator:
            # Multi- and matrixvariate Gaussians
            sample = self._dense_sample
            in_support = Normal._dense_in_support
            pdf = self._dense_pdf
            logpdf = self._dense_logpdf
            cdf = self._dense_cdf
            logcdf = self._dense_logcdf
            quantile = None

            median = None
            var = self._dense_var
            entropy = self._dense_entropy

            self._compute_cov_cholesky = self.dense_cov_cholesky

            # Ensure that the Cholesky factor has the same type as the covariance,
            # and, if necessary, promote data types. Check for (in this order): type, shape, dtype.
            if self._cov_cholesky is not None:

                if not isinstance(self._cov_cholesky, type(self._cov)):
                    raise TypeError(
                        f"The covariance matrix is of type `{type(self._cov)}`, so its "
                        f"Cholesky decomposition must be of the same type, but an "
                        f"object of type `{type(self._cov_cholesky)}` was given."
                    )

                if self._cov_cholesky.shape != self._cov.shape:
                    raise ValueError(
                        f"The cholesky decomposition of the covariance matrix must "
                        f"have the same shape as the covariance matrix, i.e. "
                        f"{self._cov.shape}, but shape {self._cov_cholesky.shape} was given"
                    )

                if self._cov_cholesky.dtype != self._cov.dtype:
                    self._cov_cholesky = self._cov_cholesky.astype(
                        self._cov.dtype, casting="safe", copy=False)

            if isinstance(cov, linops.SymmetricKronecker):
                m, n = mean.shape

                if m != n or n != cov.A.shape[0] or n != cov.B.shape[1]:
                    raise ValueError(
                        "Normal distributions with symmetric Kronecker structured "
                        "kernels must have square mean and square kernels factors with "
                        "matching dimensions.")

                if cov.identical_factors:
                    sample = self._symmetric_kronecker_identical_factors_sample

                    # pylint: disable=redefined-variable-type
                    self._compute_cov_cholesky = (
                        self.
                        _symmetric_kronecker_identical_factors_cov_cholesky)
            elif isinstance(cov, linops.Kronecker):
                m, n = mean.shape

                if (m != cov.A.shape[0] or m != cov.A.shape[1]
                        or n != cov.B.shape[0] or n != cov.B.shape[1]):
                    raise ValueError(
                        "Kronecker structured kernels must have factors with the same "
                        "shape as the mean.")

                self._compute_cov_cholesky = self._kronecker_cov_cholesky

        else:
            raise ValueError(
                f"Cannot instantiate normal distribution with mean of type "
                f"{mean.__class__.__name__} and kernels of type "
                f"{cov.__class__.__name__}.")

        super().__init__(
            shape=mean.shape,
            dtype=mean.dtype,
            random_state=random_state,
            parameters={
                "mean": self._mean,
                "cov": self._cov
            },
            sample=sample,
            in_support=in_support,
            pdf=pdf,
            logpdf=logpdf,
            cdf=cdf,
            logcdf=logcdf,
            quantile=quantile,
            mode=lambda: self._mean,
            median=median,
            mean=lambda: self._mean,
            cov=lambda: self._cov,
            var=var,
            entropy=entropy,
        )
Example #58
0
def _gibbs_removal_1d(x, axis=0, n_points=3):
    """Suppresses Gibbs ringing along a given axis using fourier sub-shifts.

    Parameters
    ----------
    x : 2D ndarray
        Matrix x.
    axis : int (0 or 1)
        Axis in which Gibbs oscillations will be suppressed.
        Default is set to 0.
    n_points : int, optional
        Number of neighbours to access local TV (see note).
        Default is set to 3.

    Returns
    -------
    xc : 2D ndarray
        Matrix with suppressed Gibbs oscillations along the given axis.

    Notes
    -----
    This function suppresses the effects of Gibbs oscillations based on the
    analysis of local total variation (TV). Although artefact correction is
    done based on two adjacent points for each voxel, total variation should be
    accessed in a larger range of neighbours. The number of neighbours to be
    considered in TV calculation can be adjusted using the parameter n_points.

    """
    dtype_float = np.promote_types(x.real.dtype, np.float32)

    ssamp = np.linspace(0.02, 0.9, num=45, dtype=dtype_float)

    xs = x.copy() if axis else x.T.copy()

    # TV for shift zero (baseline)
    tvr, tvl = _image_tv(xs, axis=1, n_points=n_points)
    tvp = np.minimum(tvr, tvl)
    tvn = tvp.copy()

    # Find optimal shift for gibbs removal
    isp = xs.copy()
    isn = xs.copy()
    sp = np.zeros(xs.shape, dtype=dtype_float)
    sn = np.zeros(xs.shape, dtype=dtype_float)
    N = xs.shape[1]
    c = _fft.fft(xs, axis=1)
    k = _fft.fftfreq(N, 1 / (2.0j * np.pi))
    k = k.astype(c.dtype, copy=False)
    for s in ssamp:
        ks = k * s
        # Access positive shift for given s
        img_p = abs(_fft.ifft(c * np.exp(ks), axis=1))

        tvsr, tvsl = _image_tv(img_p, axis=1, n_points=n_points)
        tvs_p = np.minimum(tvsr, tvsl)

        # Access negative shift for given s
        img_n = abs(_fft.ifft(c * np.exp(-ks), axis=1))
        tvsr, tvsl = _image_tv(img_n, axis=1, n_points=n_points)
        tvs_n = np.minimum(tvsr, tvsl)

        # Update positive shift params
        isp[tvp > tvs_p] = img_p[tvp > tvs_p]
        sp[tvp > tvs_p] = s
        tvp[tvp > tvs_p] = tvs_p[tvp > tvs_p]

        # Update negative shift params
        isn[tvn > tvs_n] = img_n[tvn > tvs_n]
        sn[tvn > tvs_n] = s
        tvn[tvn > tvs_n] = tvs_n[tvn > tvs_n]

    # check non-zero sub-voxel shifts
    idx = np.nonzero(sp + sn)

    # use positive and negative optimal sub-voxel shifts to interpolate to
    # original grid points
    xs[idx] = (isp[idx] - isn[idx])/(sp[idx] + sn[idx])*sn[idx] + isn[idx]

    return xs if axis else xs.T
Example #59
0
def histogram(image, nbins=256):
    """Return histogram of image.

    Unlike `numpy.histogram`, this function returns the centers of bins and
    does not rebin integer arrays. For integer arrays, each integer value has
    its own bin, which improves speed and intensity-resolution.

    The histogram is computed on the flattened image: for color images, the
    function should be used separately on each channel to obtain a histogram
    for each color channel.

    Parameters
    ----------
    image : array
        Input image.
    nbins : int
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.

    Returns
    -------
    hist : array
        The values of the histogram.
    bin_centers : array
        The values at the center of the bins.

    Examples
    --------
    >>> from skimage import data, exposure, util
    >>> image = util.img_as_float(data.camera())
    >>> np.histogram(image, bins=2)
    (array([107432, 154712]), array([ 0. ,  0.5,  1. ]))
    >>> exposure.histogram(image, nbins=2)
    (array([107432, 154712]), array([ 0.25,  0.75]))
    """
    sh = image.shape
    if len(sh) == 3 and sh[-1] < 4:
        warnings.warn("This might be a color image. The histogram will be "
                      "computed on the flattened image. You can instead "
                      "apply this function to each color channel.")

    # For integer types, histogramming with bincount is more efficient.
    if np.issubdtype(image.dtype, np.integer):
        offset = 0
        image_min = np.min(image)
        if image_min < 0:
            offset = image_min
            image_range = np.max(image).astype(np.int64) - image_min
            # get smallest dtype that can hold both minimum and offset maximum
            offset_dtype = np.promote_types(np.min_scalar_type(image_range),
                                            np.min_scalar_type(image_min))
            if image.dtype != offset_dtype:
                # prevent overflow errors when offsetting
                image = image.astype(offset_dtype)
            image = image - offset
        hist = np.bincount(image.ravel())
        bin_centers = np.arange(len(hist)) + offset

        # clip histogram to start with a non-zero bin
        idx = np.nonzero(hist)[0][0]
        return hist[idx:], bin_centers[idx:]
    else:
        hist, bin_edges = np.histogram(image.flat, nbins)
        bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
        return hist, bin_centers
Example #60
0
def svd(a, full_matrices=True, compute_uv=True):
    """Singular Value Decomposition.

    Factorizes the matrix ``a`` as ``u * np.diag(s) * v``, where ``u`` and
    ``v`` are unitary and ``s`` is an one-dimensional array of ``a``'s
    singular values.

    Args:
        a (cupy.ndarray): The input matrix with dimension ``(..., M, N)``.
        full_matrices (bool): If True, it returns u and v with dimensions
            ``(..., M, M)`` and ``(..., N, N)``. Otherwise, the dimensions
            of u and v are ``(..., M, K)`` and ``(..., K, N)``, respectively,
            where ``K = min(M, N)``.
        compute_uv (bool): If ``False``, it only returns singular values.

    Returns:
        tuple of :class:`cupy.ndarray`:
            A tuple of ``(u, s, v)`` such that ``a = u * np.diag(s) * v``.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. note::
        On CUDA, when ``a.ndim > 2`` and the matrix dimensions <= 32, a fast
        code path based on Jacobian method (``gesvdj``) is taken. Otherwise,
        a QR method (``gesvd``) is used.

        On ROCm, there is no such a fast code path that switches the underlying
        algorithm.

    .. seealso:: :func:`numpy.linalg.svd`
    """
    _util._assert_cupy_array(a)

    # Cast to float32 or float64
    a_dtype = numpy.promote_types(a.dtype.char, 'f').char
    if a_dtype == 'f':
        s_dtype = 'f'
    elif a_dtype == 'd':
        s_dtype = 'd'
    elif a_dtype == 'F':
        s_dtype = 'f'
    else:  # a_dtype == 'D':
        a_dtype = 'D'
        s_dtype = 'd'

    if a.ndim > 2:
        return _svd_batched(a, a_dtype, full_matrices, compute_uv)

    # Remark 1: gesvd only supports m >= n (WHAT?)
    # Remark 2: gesvd returns matrix U and V^H
    n, m = a.shape

    if m == 0 or n == 0:
        s = cupy.empty((0, ), s_dtype)
        if compute_uv:
            if full_matrices:
                u = cupy.eye(n, dtype=a_dtype)
                vt = cupy.eye(m, dtype=a_dtype)
            else:
                u = cupy.empty((n, 0), dtype=a_dtype)
                vt = cupy.empty((0, m), dtype=a_dtype)
            return u, s, vt
        else:
            return s

    # `a` must be copied because xgesvd destroys the matrix
    if m >= n:
        x = a.astype(a_dtype, order='C', copy=True)
        trans_flag = False
    else:
        m, n = a.shape
        x = a.transpose().astype(a_dtype, order='C', copy=True)
        trans_flag = True

    k = n  # = min(m, n) where m >= n is ensured above
    if compute_uv:
        if full_matrices:
            u = cupy.empty((m, m), dtype=a_dtype)
            vt = x[:, :n]
            job_u = ord('A')
            job_vt = ord('O')
        else:
            u = x
            vt = cupy.empty((k, n), dtype=a_dtype)
            job_u = ord('O')
            job_vt = ord('S')
        u_ptr, vt_ptr = u.data.ptr, vt.data.ptr
    else:
        u_ptr, vt_ptr = 0, 0  # Use nullptr
        job_u = ord('N')
        job_vt = ord('N')
    s = cupy.empty(k, dtype=s_dtype)
    handle = device.get_cusolver_handle()
    dev_info = cupy.empty(1, dtype=numpy.int32)

    if a_dtype == 'f':
        gesvd = cusolver.sgesvd
        gesvd_bufferSize = cusolver.sgesvd_bufferSize
    elif a_dtype == 'd':
        gesvd = cusolver.dgesvd
        gesvd_bufferSize = cusolver.dgesvd_bufferSize
    elif a_dtype == 'F':
        gesvd = cusolver.cgesvd
        gesvd_bufferSize = cusolver.cgesvd_bufferSize
    else:  # a_dtype == 'D':
        gesvd = cusolver.zgesvd
        gesvd_bufferSize = cusolver.zgesvd_bufferSize

    buffersize = gesvd_bufferSize(handle, m, n)
    workspace = cupy.empty(buffersize, dtype=a_dtype)
    if not runtime.is_hip:
        # rwork can be NULL if the information from supperdiagonal isn't needed
        # https://docs.nvidia.com/cuda/cusolver/index.html#cuSolverDN-lt-t-gt-gesvd  # noqa
        rwork_ptr = 0
    else:
        rwork = cupy.empty(min(m, n) - 1, dtype=s_dtype)
        rwork_ptr = rwork.data.ptr
    gesvd(handle, job_u, job_vt, m, n, x.data.ptr, m, s.data.ptr, u_ptr, m,
          vt_ptr, n, workspace.data.ptr, buffersize, rwork_ptr,
          dev_info.data.ptr)
    cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
        gesvd, dev_info)

    # Note that the returned array may need to be transposed
    # depending on the structure of an input
    if compute_uv:
        if trans_flag:
            return u.transpose(), s, vt.transpose()
        else:
            return vt, s, u
    else:
        return s