예제 #1
0
def sparsify_and_convert(dataset, conversion_format, sparsify_ratio=0.3):
    """Randomly set values to 0 and produce a sparse array.

    Parameters
    ----------
    dataset : array
        Input array to convert
    conversion_format : string
        Type of sparse array :
        - scipy-csr: SciPy CSR sparse array
        - scipy-csc: SciPy CSC sparse array
        - scipy-coo: SciPy COO sparse array
        - cupy-csr: CuPy CSR sparse array
        - cupy-csc: CuPy CSC sparse array
        - cupy-coo: CuPy COO sparse array
    sparsify_ratio: float [0-1]
        Ratio of zeros in the sparse array

    Returns
    -------
    SciPy CSR array and converted array
    """
    random_loc = cp.random.choice(dataset.size,
                                  int(dataset.size * sparsify_ratio),
                                  replace=False)
    dataset.ravel()[random_loc] = 0

    if conversion_format.startswith("scipy"):
        dataset = cp.asnumpy(dataset)

    if conversion_format == "scipy-csr":
        converted_dataset = cpu_csr_matrix(dataset)
    elif conversion_format == "scipy-csc":
        converted_dataset = cpu_csc_matrix(dataset)
    elif conversion_format == "scipy-coo":
        converted_dataset = cpu_coo_matrix(dataset)
    elif conversion_format == "cupy-csr":
        converted_dataset = gpu_csr_matrix(dataset)
    elif conversion_format == "cupy-csc":
        converted_dataset = gpu_csc_matrix(dataset)
    elif conversion_format == "cupy-coo":
        np_array = cp.asnumpy(dataset)
        np_coo_array = cpu_coo_matrix(np_array)
        converted_dataset = gpu_coo_matrix(np_coo_array)

    if conversion_format.startswith("cupy"):
        dataset = cp.asnumpy(dataset)

    return cpu_csr_matrix(dataset), converted_dataset
예제 #2
0
def check_array(array,
                accept_sparse=False,
                accept_large_sparse=True,
                dtype='numeric',
                order=None,
                copy=False,
                force_all_finite=True,
                ensure_2d=True,
                allow_nd=False,
                ensure_min_samples=1,
                ensure_min_features=1,
                warn_on_dtype=None,
                estimator=None):
    """Input validation on an array, list, sparse matrix or similar.
    By default, the input is checked to be a non-empty 2D array containing
    only finite values. If the dtype of the array is object, attempt
    converting to float, raising on failure.

    Parameters
    ----------
    array : object
        Input object to check / convert.
    accept_sparse : string, boolean or list/tuple of strings (default=False)
        String[s] representing allowed sparse matrix formats, such as 'csc',
        'csr', etc. If the input is sparse but not in the allowed format,
        it will be converted to the first listed format. True allows the input
        to be any format. False means that a sparse matrix input will
        raise an error.
    accept_large_sparse : bool (default=True)
        If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
        accept_sparse, accept_large_sparse=False will cause it to be accepted
        only if its indices are stored with a 32-bit dtype.
    dtype : string, type, list of types or None (default="numeric")
        Data type of result. If None, the dtype of the input is preserved.
        If "numeric", dtype is preserved unless array.dtype is object.
        If dtype is a list of types, conversion on the first type is only
        performed if the dtype of the input is not in the list.
    order : 'F', 'C' or None (default=None)
        Whether an array will be forced to be fortran or c-style.
        When order is None (default), then if copy=False, nothing is ensured
        about the memory layout of the output array; otherwise (copy=True)
        the memory layout of the returned array is kept as close as possible
        to the original array.
    copy : boolean (default=False)
        Whether a forced copy will be triggered. If copy=False, a copy might
        be triggered by a conversion.
    force_all_finite : boolean or 'allow-nan', (default=True)
        Whether to raise an error on np.inf, np.nan, pd.NA in array. The
        possibilities are:
        - True: Force all values of array to be finite.
        - False: accepts np.inf, np.nan, pd.NA in array.
        - 'allow-nan': accepts only np.nan and pd.NA values in array. Values
          cannot be infinite.
           ``force_all_finite`` accepts the string ``'allow-nan'``.
    ensure_2d : boolean (default=True)
        Whether to raise a value error if array is not 2D.
    allow_nd : boolean (default=False)
        Whether to allow array.ndim > 2.
    ensure_min_samples : int (default=1)
        Make sure that the array has a minimum number of samples in its first
        axis (rows for a 2D array). Setting to 0 disables this check.
    ensure_min_features : int (default=1)
        Make sure that the 2D array has some minimum number of features
        (columns). The default value of 1 rejects empty datasets.
        This check is only enforced when the input data has effectively 2
        dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
        disables this check.
    estimator : unused parameter

    Returns
    -------
    array_converted : object
        The converted and validated array.
    """

    if dtype == 'numeric':
        dtype = numeric_types

    correct_dtype = check_dtype(array, dtype)

    if copy and not order and hasattr(array, 'flags'):
        if array.flags['F_CONTIGUOUS']:
            order = 'F'
        elif array.flags['C_CONTIGUOUS']:
            order = 'C'

    if not order:
        order = 'F'

    hasshape = hasattr(array, 'shape')
    if ensure_2d and hasshape:
        if len(array.shape) != 2:
            raise ValueError("Not 2D")

    if not allow_nd and hasshape:
        if len(array.shape) > 2:
            raise ValueError("More than 2 dimensions detected")

    if ensure_min_samples > 0 and hasshape:
        if array.shape[0] < ensure_min_samples:
            raise ValueError("Not enough samples")

    if ensure_min_features > 0 and hasshape and array.ndim == 2:
        n_features = array.shape[1]
        if n_features < ensure_min_features:
            raise ValueError("Found array with %d feature(s) (shape=%s) while"
                             " a minimum of %d is required." %
                             (n_features, array.shape, ensure_min_features))

    is_sparse = cpu_sparse.issparse(array) or gpu_sparse.issparse(array)
    if is_sparse:
        check_sparse(array, accept_sparse, accept_large_sparse)
        if array.format == 'csr':
            new_array = gpu_csr_matrix(array, copy=copy)
        elif array.format == 'csc':
            new_array = gpu_csc_matrix(array, copy=copy)
        elif array.format == 'coo':
            new_array = gpu_coo_matrix(array, copy=copy)
        else:
            raise ValueError('Sparse matrix format not supported')
        check_finite(new_array.data, force_all_finite)
        if correct_dtype != new_array.dtype:
            new_array = new_array.astype(correct_dtype)
        return new_array
    else:
        X, n_rows, n_cols, dtype = input_to_cupy_array(array,
                                                       order=order,
                                                       deepcopy=copy,
                                                       fail_on_null=False)
        if correct_dtype != dtype:
            X = X.astype(correct_dtype)
        check_finite(X, force_all_finite)
        return X