Exemplo n.º 1
0
def make_blobs(n_samples=100, n_features=2, centers=None, cluster_std=1.0,
               n_parts=None, center_box=(-10, 10), shuffle=True,
               random_state=None, return_centers=False,
               verbosity=logger.LEVEL_INFO, order='F', dtype='float32',
               client=None):
    """
    Makes labeled Dask-Cupy arrays containing blobs
    for a randomly generated set of centroids.

    This function calls `make_blobs` from `cuml.datasets` on each Dask worker
    and aggregates them into a single Dask Dataframe.

    For more information on Scikit-learn's `make_blobs:
    <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html>`_.

    Parameters
    ----------

    n_samples : int
        number of rows
    n_features : int
        number of features
    centers : int or array of shape [n_centers, n_features],
        optional (default=None) The number of centers to generate, or the fixed
        center locations. If n_samples is an int and centers is None, 3 centers
        are generated. If n_samples is array-like, centers must be either None
        or an array of length equal to the length of n_samples.
    cluster_std : float (default = 1.0)
         standard deviation of points around centroid
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)
    center_box : tuple (int, int) (default = (-10, 10))
         the bounding box which constrains all the centroids
    random_state : int (default = None)
         sets random seed (or use None to reinitialize each time)
    return_centers : bool, optional (default=False)
        If True, then return the centers of each cluster
    verbosity : int (default = cuml.logger.LEVEL_INFO)
         Logging level.
    shuffle : bool (default=False)
              Shuffles the samples on each worker.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    client : dask.distributed.Client (optional)
             Dask client to use

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The input samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The output values.
    centers : dask.array backed by CuPy array of shape
        [n_centers, n_features], optional
        The centers of the underlying blobs. It is returned only if
        return_centers is True.
    """

    client = get_client(client=client)

    generator = _create_rs_generator(random_state=random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    centers, n_centers = _get_centers(generator, centers, center_box,
                                      n_samples, n_features,
                                      dtype)

    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    if rows_per_part == 1:
        worker_rows[-1] += n_samples % n_parts
    else:
        worker_rows[-1] += n_samples % rows_per_part

    worker_rows = tuple(worker_rows)

    logger.debug("Generating %d samples across %d partitions on "
                 "%d workers (total=%d samples)" %
                 (math.ceil(n_samples / len(workers)),
                  n_parts, len(workers), n_samples))

    seeds = generator.randint(n_samples, size=len(parts_workers))
    parts = [client.submit(_create_local_data,
                           part_rows,
                           n_features,
                           centers,
                           cluster_std,
                           shuffle,
                           int(seeds[idx]),
                           order,
                           dtype,
                           pure=False,
                           workers=[parts_workers[idx]])
             for idx, part_rows in enumerate(worker_rows)]

    X = [client.submit(_get_X, f, pure=False)
         for idx, f in enumerate(parts)]
    y = [client.submit(_get_labels, f, pure=False)
         for idx, f in enumerate(parts)]

    X_del = _create_delayed(X, dtype, worker_rows, n_features)
    y_del = _create_delayed(y, dtype, worker_rows)

    X_final = da.concatenate(X_del, axis=0)
    y_final = da.concatenate(y_del, axis=0)

    if return_centers:
        return X_final, y_final, centers
    else:
        return X_final, y_final
Exemplo n.º 2
0
def make_blobs(n_samples=100,
               n_features=2,
               centers=None,
               cluster_std=1.0,
               center_box=(-10.0, 10.0),
               shuffle=True,
               random_state=None,
               return_centers=False,
               order='F',
               dtype='float32'):
    """Generate isotropic Gaussian blobs for clustering.

    Parameters
    ----------
    n_samples : int or array-like, optional (default=100)
        If int, it is the total number of points equally divided among
        clusters.
        If array-like, each element of the sequence indicates
        the number of samples per cluster.
    n_features : int, optional (default=2)
        The number of features for each sample.
    centers : int or array of shape [n_centers, n_features], optional
        (default=None)
        The number of centers to generate, or the fixed center locations.
        If n_samples is an int and centers is None, 3 centers are generated.
        If n_samples is array-like, centers must be
        either None or an array of length equal to the length of n_samples.
    cluster_std : float or sequence of floats, optional (default=1.0)
        The standard deviation of the clusters.
    center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
        The bounding box for each cluster center when centers are
        generated at random.
    shuffle : boolean, optional (default=True)
        Shuffle the samples.
    random_state : int, RandomState instance, default=None
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
    return_centers : bool, optional (default=False)
        If True, then return the centers of each cluster
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples

    Returns
    -------
    X : device array of shape [n_samples, n_features]
        The generated samples.
    y : device array of shape [n_samples]
        The integer labels for cluster membership of each sample.
    centers : device array, shape [n_centers, n_features]
        The centers of each cluster. Only returned if
        ``return_centers=True``.

    Examples
    --------
    >>> from sklearn.datasets import make_blobs
    >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
    ...                   random_state=0)
    >>> print(X.shape)
    (10, 2)
    >>> y
    array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
    >>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
    ...                   random_state=0)
    >>> print(X.shape)
    (10, 2)
    >>> y
    array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])

    See also
    --------
    make_classification: a more intricate variant
    """
    generator = _create_rs_generator(random_state=random_state)

    centers, n_centers = _get_centers(generator, centers, center_box,
                                      n_samples, n_features, dtype)

    # stds: if cluster_std is given as list, it must be consistent
    # with the n_centers
    if (hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers):
        raise ValueError("Length of `clusters_std` not consistent with "
                         "number of centers. Got centers = {} "
                         "and cluster_std = {}".format(centers, cluster_std))

    if isinstance(cluster_std, numbers.Real):
        cluster_std = cp.full(len(centers), cluster_std)

    if isinstance(n_samples, Iterable):
        n_samples_per_center = n_samples
    else:
        n_samples_per_center = [int(n_samples // n_centers)] * n_centers

        for i in range(n_samples % n_centers):
            n_samples_per_center[i] += 1

    X = cp.zeros(n_samples * n_features, dtype=dtype)
    X = X.reshape((n_samples, n_features), order=order)
    y = cp.zeros(n_samples, dtype=dtype)

    if shuffle:
        proba_samples_per_center = np.array(n_samples_per_center) / np.sum(
            n_samples_per_center)
        np_seed = int(generator.randint(n_samples, size=1))
        np.random.seed(np_seed)
        shuffled_sample_indices = cp.array(
            np.random.choice(n_centers,
                             n_samples,
                             replace=True,
                             p=proba_samples_per_center))
        for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
            center_indices = cp.where(shuffled_sample_indices == i)

            y[center_indices[0]] = i

            X_k = generator.normal(scale=std,
                                   size=(len(center_indices[0]), n_features),
                                   dtype=dtype)

            # NOTE: Adding the loc explicitly as cupy has a bug
            # when calling generator.normal with an array for loc.
            # cupy.random.normal, however, works with the same
            # arguments
            cp.add(X_k, centers[i], out=X_k)
            X[center_indices[0], :] = X_k
    else:
        stop = 0
        for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
            start, stop = stop, stop + n_samples_per_center[i]

            y[start:stop] = i

            X_k = generator.normal(scale=std,
                                   size=(n, n_features),
                                   dtype=dtype)

            cp.add(X_k, centers[i], out=X_k)
            X[start:stop, :] = X_k

    if return_centers:
        return X, y, centers
    else:
        return X, y
Exemplo n.º 3
0
def make_classification(n_samples=100, n_features=20, n_informative=2,
                        n_redundant=2, n_repeated=0, n_classes=2,
                        n_clusters_per_class=2, weights=None, flip_y=0.01,
                        class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
                        shuffle=True, random_state=None, order='F',
                        dtype='float32', n_parts=None, client=None):
    """
    Generate a random n-class classification problem.

    This initially creates clusters of points normally distributed (std=1)
    about vertices of an `n_informative`-dimensional hypercube with sides of
    length ``2 * class_sep`` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.

    Without shuffling, ``X`` horizontally stacks features in the following
    order: the primary `n_informative` features, followed by `n_redundant`
    linear combinations of the informative features, followed by `n_repeated`
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    ``X[:, :n_informative + n_redundant + n_repeated]``.

    Examples
    --------

    .. code-block:: python

        from dask.distributed import Client
        from dask_cuda import LocalCUDACluster
        from cuml.dask.datasets.classification import make_classification
        cluster = LocalCUDACluster()
        client = Client(cluster)
        X, y = make_classification(n_samples=10, n_features=4,
                                   n_informative=2, n_classes=2)

        print("X:")
        print(X.compute())

        print("y:")
        print(y.compute())

    Output:

    .. code-block:: python

        X:
        [[-1.6990056  -0.8241044  -0.06997631  0.45107925]
        [-1.8105277   1.7829906   0.492909    0.05390119]
        [-0.18290454 -0.6155432   0.6667889  -1.0053712 ]
        [-2.7530136  -0.888528   -0.5023055   1.3983376 ]
        [-0.9788184  -0.89851004  0.10802134 -0.10021686]
        [-0.76883423 -1.0689086   0.01249526 -0.1404741 ]
        [-1.5676656  -0.83082974 -0.03072987  0.34499463]
        [-0.9381793  -1.0971068  -0.07465998  0.02618019]
        [-1.3021476  -0.87076336  0.02249984  0.15187258]
        [ 1.1820307   1.7524253   1.5087451  -2.4626074 ]]

        y:
        [0 1 0 0 0 0 0 0 0 1]

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features,
        `n_repeated` duplicated features and
        ``n_features-n_informative-n_redundant-n_repeated`` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape ``(n_classes,)`` or ``(n_classes - 1,)``, \
        (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if ``len(weights) == n_classes - 1``,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of
        `weights` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The generated samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    How we extended the dask MNMG version from the single GPU version:

    1. We generate centroids of shape ``(n_centroids, n_informative)``
    2. We generate an informative covariance of shape \
        ``(n_centroids, n_informative, n_informative)``
    3. We generate a redundant covariance of shape \
        ``(n_informative, n_redundant)``
    4. We generate the indices for the repeated features \
    We pass along the references to the futures of the above arrays \
    with each part to the single GPU \
    `cuml.datasets.classification.make_classification` so that each \
    part (and worker) has access to the correct values to generate \
    data from the same covariances

    """

    client = get_client(client=client)

    rs = _create_rs_generator(random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    n_clusters = n_classes * n_clusters_per_class

    # create centroids
    centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
                                             rs)).astype(dtype, copy=False)

    covariance_seeds = rs.randint(n_features, size=2)
    informative_covariance = client.submit(_create_covariance,
                                           (n_clusters, n_informative,
                                            n_informative),
                                           int(covariance_seeds[0]),
                                           pure=False)

    redundant_covariance = client.submit(_create_covariance,
                                         (n_informative,
                                          n_redundant),
                                         int(covariance_seeds[1]),
                                         pure=False)

    # repeated indices
    n = n_informative + n_redundant
    repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype)
                        + 0.5).astype(np.intp)

    # scale and shift
    if shift is None:
        shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep

    if scale is None:
        scale = 1 + 100 * rs.rand(n_features, dtype=dtype)

    # Create arrays on each worker (gpu)
    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    worker_rows[-1] += (n_samples % n_parts)

    worker_rows = tuple(worker_rows)

    part_seeds = rs.permutation(n_parts)
    parts = [client.submit(sg_make_classification, worker_rows[i], n_features,
                           n_informative, n_redundant, n_repeated, n_classes,
                           n_clusters_per_class, weights, flip_y, class_sep,
                           hypercube, shift, scale, shuffle,
                           int(part_seeds[i]), order, dtype, centroids,
                           informative_covariance, redundant_covariance,
                           repeated_indices, pure=False,
                           workers=[parts_workers[i]])
             for i in range(len(parts_workers))]

    X_parts = [client.submit(_get_X, f, pure=False)
               for idx, f in enumerate(parts)]
    y_parts = [client.submit(_get_labels, f, pure=False)
               for idx, f in enumerate(parts)]

    X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features)
    y_dela = _create_delayed(y_parts, dtype, worker_rows)

    X = da.concatenate(X_dela)
    y = da.concatenate(y_dela)

    return X, y
Exemplo n.º 4
0
def make_classification(n_samples=100,
                        n_features=20,
                        n_informative=2,
                        n_redundant=2,
                        n_repeated=0,
                        n_classes=2,
                        n_clusters_per_class=2,
                        weights=None,
                        flip_y=0.01,
                        class_sep=1.0,
                        hypercube=True,
                        shift=0.0,
                        scale=1.0,
                        shuffle=True,
                        random_state=None,
                        order='F',
                        dtype='float32',
                        _centroids=None,
                        _informative_covariance=None,
                        _redundant_covariance=None,
                        _repeated_indices=None):
    """Generate a random n-class classification problem.
    This initially creates clusters of points normally distributed (std=1)
    about vertices of an ``n_informative``-dimensional hypercube with sides of
    length ``2*class_sep`` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.
    Without shuffling, ``X`` horizontally stacks features in the following
    order: the primary ``n_informative`` features, followed by ``n_redundant``
    linear combinations of the informative features, followed by ``n_repeated``
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    ``X[:, :n_informative + n_redundant + n_repeated]``.

    Examples
    --------

    .. code-block:: python

        from cuml.datasets.classification import make_classification

        X, y = make_classification(n_samples=10, n_features=4,
                                   n_informative=2, n_classes=2)

        print("X:")
        print(X)

        print("y:")
        print(y)

    Output:

    .. code-block:: python

        X:
        [[-2.3249989  -0.8679415  -1.1511791   1.3525577 ]
        [ 2.2933831   1.3743551   0.63128835 -0.84648645]
        [ 1.6361488  -1.3233329   0.807027   -0.894092  ]
        [-1.0093077  -0.9990691  -0.00808992  0.00950443]
        [ 0.99803793  2.068382    0.49570698 -0.8462848 ]
        [-1.2750955  -0.9725835  -0.2390058   0.28081596]
        [-1.3635055  -0.9637669  -0.31582272  0.37106958]
        [ 1.1893625   2.227583    0.48750278 -0.8737561 ]
        [-0.05753583 -1.0939395   0.8188342  -0.9620734 ]
        [ 0.47910076  0.7648213  -0.17165393  0.26144698]]

        y:
        [0 1 0 0 1 0 0 1 0 1]

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise ``n_informative``
        informative features, ``n_redundant`` redundant features,
        ``n_repeated`` duplicated features and
        ``n_features-n_informative-n_redundant-n_repeated`` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension ``n_informative``. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape (n_classes,) or (n_classes - 1,),\
              (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if ``len(weights) == n_classes - 1``,
        then the last class weight is automatically inferred.
        More than ``n_samples`` samples may be returned if the sum of
        ``weights`` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    _centroids: array of centroids of shape (n_clusters, n_informative)
    _informative_covariance: array for covariance between informative features
        of shape (n_clusters, n_informative, n_informative)
    _redundant_covariance: array for covariance between redundant features
        of shape (n_informative, n_redundant)
    _repeated_indices: array of indices for the repeated features
        of shape (n_repeated, )

    Returns
    -------
    X : device array of shape [n_samples, n_features]
        The generated samples.
    y : device array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset. How we optimized for GPUs:

        1. Firstly, we generate X from a standard univariate instead of zeros.
           This saves memory as we don't need to generate univariates each
           time for each feature class (informative, repeated, etc.) while
           also providing the added speedup of generating a big matrix
           on GPU
        2. We generate `order=F` construction. We exploit the
           fact that X is a generated from a univariate normal, and
           covariance is introduced with matrix multiplications. Which means,
           we can generate X as a 1D array and just reshape it to the
           desired order, which only updates the metadata and eliminates
           copies
        3. Lastly, we also shuffle by construction. Centroid indices are
           permuted for each sample, and then we construct the data for
           each centroid. This shuffle works for both `order=C` and
           `order=F` and eliminates any need for secondary copies

    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.

    """
    generator = _create_rs_generator(random_state)
    np_seed = int(generator.randint(n_samples, size=1))
    np.random.seed(np_seed)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    # Use log2 to avoid overflow errors
    if n_informative < np.log2(n_classes * n_clusters_per_class):
        msg = "n_classes({}) * n_clusters_per_class({}) must be"
        msg += " smaller or equal 2**n_informative({})={}"
        raise ValueError(
            msg.format(n_classes, n_clusters_per_class, n_informative,
                       2**n_informative))

    if weights is not None:
        if len(weights) not in [n_classes, n_classes - 1]:
            raise ValueError("Weights specified but incompatible with number "
                             "of classes.")
        if len(weights) == n_classes - 1:
            if isinstance(weights, list):
                weights = weights + [1.0 - sum(weights)]
            else:
                weights = np.resize(weights, n_classes)
                weights[-1] = 1.0 - sum(weights[:-1])
    else:
        weights = [1.0 / n_classes] * n_classes

    n_clusters = n_classes * n_clusters_per_class

    # Distribute samples among clusters by weight
    n_samples_per_cluster = [
        int(n_samples * weights[k % n_classes] / n_clusters_per_class)
        for k in range(n_clusters)
    ]

    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Initialize X and y
    X = generator.randn(n_samples * n_features, dtype=dtype)
    X = X.reshape((n_samples, n_features), order=order)
    y = cp.zeros(n_samples, dtype=np.int64)

    # Build the polytope whose vertices become cluster centroids
    if _centroids is None:
        centroids = cp.array(
            _generate_hypercube(n_clusters, n_informative,
                                generator)).astype(dtype, copy=False)
    else:
        centroids = _centroids
    centroids *= 2 * class_sep
    centroids -= class_sep
    if not hypercube:
        centroids *= generator.rand(n_clusters, 1, dtype=dtype)
        centroids *= generator.rand(1, n_informative, dtype=dtype)

    # Create redundant features
    if n_redundant > 0:
        if _redundant_covariance is None:
            B = 2 * generator.rand(n_informative, n_redundant, dtype=dtype) - 1
        else:
            B = _redundant_covariance

    # Create each cluster; a variant of make_blobs
    if shuffle:
        proba_samples_per_cluster = np.array(n_samples_per_cluster) / np.sum(
            n_samples_per_cluster)
        shuffled_sample_indices = cp.array(
            np.random.choice(n_clusters,
                             n_samples,
                             replace=True,
                             p=proba_samples_per_cluster))
        for k, centroid in enumerate(centroids):
            centroid_indices = cp.where(shuffled_sample_indices == k)
            y[centroid_indices[0]] = k % n_classes

            X_k = X[centroid_indices[0], :n_informative]

            if _informative_covariance is None:
                A = 2 * generator.rand(
                    n_informative, n_informative, dtype=dtype) - 1
            else:
                A = _informative_covariance[k]
            X_k = cp.dot(X_k, A)

            # NOTE: This could be done outside the loop, but a current
            # cupy bug does not allow that
            # https://github.com/cupy/cupy/issues/3284
            if n_redundant > 0:
                X[centroid_indices[0],
                  n_informative:n_informative + n_redundant] = cp.dot(X_k, B)

            X_k += centroid  # shift the cluster to a vertex
            X[centroid_indices[0], :n_informative] = X_k
    else:
        stop = 0
        for k, centroid in enumerate(centroids):
            start, stop = stop, stop + n_samples_per_cluster[k]
            y[start:stop] = k % n_classes  # assign labels
            X_k = X[start:stop, :n_informative]  # slice a view of the cluster

            if _informative_covariance is None:
                A = 2 * generator.rand(
                    n_informative, n_informative, dtype=dtype) - 1
            else:
                A = _informative_covariance[k]
            X_k = cp.dot(X_k, A)  # introduce random covariance

            if n_redundant > 0:
                X[start:stop, n_informative:n_informative + n_redundant] = \
                    cp.dot(X_k, B)

            X_k += centroid  # shift the cluster to a vertex
            X[start:stop, :n_informative] = X_k

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        if _repeated_indices is None:
            indices = ((n - 1) * generator.rand(n_repeated, dtype=dtype) +
                       0.5).astype(np.intp)
        else:
            indices = _repeated_indices
        X[:, n:n + n_repeated] = X[:, indices]

    # Randomly replace labels
    if flip_y >= 0.0:
        flip_mask = generator.rand(n_samples, dtype=dtype) < flip_y
        y[flip_mask] = generator.randint(n_classes, size=int(flip_mask.sum()))

    # Randomly shift and scale
    if shift is None:
        shift = (2 * generator.rand(n_features, dtype=dtype) - 1) * class_sep
    X += shift

    if scale is None:
        scale = 1 + 100 * generator.rand(n_features, dtype=dtype)
    X *= scale

    return X, y
Exemplo n.º 5
0
def make_classification(n_samples=100,
                        n_features=20,
                        n_informative=2,
                        n_redundant=2,
                        n_repeated=0,
                        n_classes=2,
                        n_clusters_per_class=2,
                        weights=None,
                        flip_y=0.01,
                        class_sep=1.0,
                        hypercube=True,
                        shift=0.0,
                        scale=1.0,
                        shuffle=True,
                        random_state=None,
                        order='F',
                        dtype='float32',
                        n_parts=None,
                        client=None):
    """
    Generate a random n-class classification problem.

    This initially creates clusters of points normally distributed (std=1)
    about vertices of an `n_informative`-dimensional hypercube with sides of
    length :py:`2 * class_sep` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.

    Without shuffling, `X` horizontally stacks features in the following
    order: the primary `n_informative` features, followed by `n_redundant`
    linear combinations of the informative features, followed by `n_repeated`
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    :py:`X[:, :n_informative + n_redundant + n_repeated]`.

    Examples
    --------
    .. code-block:: python

        >>> from dask.distributed import Client
        >>> from dask_cuda import LocalCUDACluster
        >>> from cuml.dask.datasets.classification import make_classification
        >>> cluster = LocalCUDACluster()
        >>> client = Client(cluster)
        >>> X, y = make_classification(n_samples=10, n_features=4,
        ...                            random_state=1, n_informative=2,
        ...                            n_classes=2)
        >>> print(X.compute()) # doctest: +SKIP
        [[-1.1273878   1.2844919  -0.32349187  0.1595734 ]
        [ 0.80521786 -0.65946865 -0.40753683  0.15538901]
        [ 1.0404129  -1.481386    1.4241115   1.2664981 ]
        [-0.92821544 -0.6805706  -0.26001272  0.36004275]
        [-1.0392245  -1.1977317   0.16345565 -0.21848428]
        [ 1.2273135  -0.529214    2.4799604   0.44108105]
        [-1.9163864  -0.39505136 -1.9588828  -1.8881643 ]
        [-0.9788184  -0.89851004 -0.08339313  0.1130247 ]
        [-1.0549078  -0.8993015  -0.11921967  0.04821599]
        [-1.8388828  -1.4063598  -0.02838472 -1.0874642 ]]
        >>> print(y.compute()) # doctest: +SKIP
        [1 0 0 0 0 1 0 0 0 0]
        >>> client.close()
        >>> cluster.close()

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features,
        `n_repeated` duplicated features and
        :py:`n_features-n_informative-n_redundant-n_repeated` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape :py:`(n_classes,)` or :py:`(n_classes - 1,)`\
        , (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if :py:`len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of
        `weights` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The generated samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    How we extended the dask MNMG version from the single GPU version:

    1. We generate centroids of shape ``(n_centroids, n_informative)``
    2. We generate an informative covariance of shape \
        ``(n_centroids, n_informative, n_informative)``
    3. We generate a redundant covariance of shape \
        ``(n_informative, n_redundant)``
    4. We generate the indices for the repeated features \
    We pass along the references to the futures of the above arrays \
    with each part to the single GPU \
    `cuml.datasets.classification.make_classification` so that each \
    part (and worker) has access to the correct values to generate \
    data from the same covariances

    """

    client = get_client(client=client)

    rs = _create_rs_generator(random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    n_clusters = n_classes * n_clusters_per_class

    # create centroids
    centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
                                             rs)).astype(dtype, copy=False)

    covariance_seeds = rs.randint(n_features, size=2)
    informative_covariance = client.submit(
        _create_covariance, (n_clusters, n_informative, n_informative),
        int(covariance_seeds[0]),
        pure=False)

    redundant_covariance = client.submit(_create_covariance,
                                         (n_informative, n_redundant),
                                         int(covariance_seeds[1]),
                                         pure=False)

    # repeated indices
    n = n_informative + n_redundant
    repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype) +
                        0.5).astype(np.intp)

    # scale and shift
    if shift is None:
        shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep

    if scale is None:
        scale = 1 + 100 * rs.rand(n_features, dtype=dtype)

    # Create arrays on each worker (gpu)
    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    worker_rows[-1] += (n_samples % n_parts)

    worker_rows = tuple(worker_rows)

    part_seeds = rs.permutation(n_parts)
    parts = [
        client.submit(sg_make_classification,
                      worker_rows[i],
                      n_features,
                      n_informative,
                      n_redundant,
                      n_repeated,
                      n_classes,
                      n_clusters_per_class,
                      weights,
                      flip_y,
                      class_sep,
                      hypercube,
                      shift,
                      scale,
                      shuffle,
                      int(part_seeds[i]),
                      order,
                      dtype,
                      centroids,
                      informative_covariance,
                      redundant_covariance,
                      repeated_indices,
                      pure=False,
                      workers=[parts_workers[i]])
        for i in range(len(parts_workers))
    ]

    X_parts = [
        client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)
    ]
    y_parts = [
        client.submit(_get_labels, f, pure=False)
        for idx, f in enumerate(parts)
    ]

    X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features)
    y_dela = _create_delayed(y_parts, np.int64, worker_rows)

    X = da.concatenate(X_dela)
    y = da.concatenate(y_dela)

    return X, y