Пример #1
0
    def _create_model(self, model_func, client, workers, n_estimators,
                      base_seed, **kwargs):

        self.client = get_client(client)
        self.workers = self.client.scheduler_info()['workers'].keys()
        self.local_model = None

        self.n_estimators_per_worker = \
            self._estimators_per_worker(n_estimators)
        if base_seed is None:
            base_seed = 0
        seeds = [base_seed]
        for i in range(1, len(self.n_estimators_per_worker)):
            sd = self.n_estimators_per_worker[i - 1] + seeds[i - 1]
            seeds.append(sd)

        self.rfs = {
            worker: self.client.submit(
                model_func,
                n_estimators=self.n_estimators_per_worker[n],
                seed=seeds[n],
                **kwargs,
                pure=False,
                workers=[worker],
            )
            for n, worker in enumerate(self.workers)
        }

        wait_and_raise_from_futures(list(self.rfs.values()))
Пример #2
0
    def _create_model(self, model_func, client, workers, n_estimators,
                      base_seed, ignore_empty_partitions, **kwargs):

        self.client = get_client(client)
        if workers is None:
            # Default to all workers
            workers = self.client.scheduler_info()['workers'].keys()
        self.workers = workers
        self._set_internal_model(None)
        self.active_workers = list()
        self.ignore_empty_partitions = ignore_empty_partitions
        self.n_estimators = n_estimators

        self.n_estimators_per_worker = \
            self._estimators_per_worker(n_estimators)
        if base_seed is None:
            base_seed = 0
        seeds = [base_seed]
        for i in range(1, len(self.n_estimators_per_worker)):
            sd = self.n_estimators_per_worker[i - 1] + seeds[i - 1]
            seeds.append(sd)

        self.rfs = {
            worker: self.client.submit(
                model_func,
                n_estimators=self.n_estimators_per_worker[n],
                random_state=seeds[n],
                **kwargs,
                pure=False,
                workers=[worker],
            )
            for n, worker in enumerate(self.workers)
        }

        wait_and_raise_from_futures(list(self.rfs.values()))
Пример #3
0
    def __init__(self, *, client=None, verbose=False, **kwargs):
        """
        Constructor for distributed estimators.
        """
        self.client = get_client(client)
        self.verbose = verbose
        self.kwargs = kwargs

        self.internal_model = None
Пример #4
0
def reduce(futures, func, client=None):
    """
    Performs a cluster-wide reduction by first
    running function on worker->host->cluster. This
    function takes locality into account by first
    reducing partitions local to each worker before
    reducing partitions on each host and, finally,
    reducing the partitions across the cluster into
    a single reduced partition.

    Parameters
    ----------

    futures : array-like of dask.Future futures to reduce
    func : Python reduction function accepting list
           of objects to reduce and returning a single
           reduced object.

    client : dask.distributed.Client to use for scheduling

    Returns
    -------

    output : dask.Future a future containing the final reduce
        object.
    """

    client = get_client(client)

    # Make sure input futures have been assigned to worker(s)
    wait(futures)

    for local_reduction_func in [workers_to_parts, hosts_to_parts]:

        who_has = client.who_has(futures)

        workers = [(first(who_has[m.key]), m) for m in futures]
        worker_parts = local_reduction_func(workers)

        # Short circuit when all parts already have preferred
        # locality
        if len(worker_parts) > 1:
            # Local tree reduction for scalability
            futures = client.compute(
                [tree_reduce(p, func) for w, p in worker_parts.items()])

            wait(futures)

    # Merge across workers
    ret = client.compute(tree_reduce(futures, func))
    wait(ret)

    return ret
Пример #5
0
 def __init__(self, gpu_futures=None, workers=None,
              datatype=None, multiple=False, client=None):
     self.client = get_client(client)
     self.gpu_futures = gpu_futures
     self.worker_to_parts = _workers_to_parts(gpu_futures)
     self.workers = workers
     self.datatype = datatype
     self.multiple = multiple
     self.worker_info = None
     self.total_rows = None
     self.ranks = None
     self.parts_to_sizes = None
Пример #6
0
def make_blobs(n_samples=100, n_features=2, centers=None, cluster_std=1.0,
               n_parts=None, center_box=(-10, 10), shuffle=True,
               random_state=None, return_centers=False,
               verbosity=logger.LEVEL_INFO, order='F', dtype='float32',
               client=None):
    """
    Makes labeled Dask-Cupy arrays containing blobs
    for a randomly generated set of centroids.

    This function calls `make_blobs` from `cuml.datasets` on each Dask worker
    and aggregates them into a single Dask Dataframe.

    For more information on Scikit-learn's `make_blobs:
    <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html>`_.

    Parameters
    ----------

    n_samples : int
        number of rows
    n_features : int
        number of features
    centers : int or array of shape [n_centers, n_features],
        optional (default=None) The number of centers to generate, or the fixed
        center locations. If n_samples is an int and centers is None, 3 centers
        are generated. If n_samples is array-like, centers must be either None
        or an array of length equal to the length of n_samples.
    cluster_std : float (default = 1.0)
         standard deviation of points around centroid
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)
    center_box : tuple (int, int) (default = (-10, 10))
         the bounding box which constrains all the centroids
    random_state : int (default = None)
         sets random seed (or use None to reinitialize each time)
    return_centers : bool, optional (default=False)
        If True, then return the centers of each cluster
    verbosity : int (default = cuml.logger.LEVEL_INFO)
         Logging level.
    shuffle : bool (default=False)
              Shuffles the samples on each worker.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    client : dask.distributed.Client (optional)
             Dask client to use

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The input samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The output values.
    centers : dask.array backed by CuPy array of shape
        [n_centers, n_features], optional
        The centers of the underlying blobs. It is returned only if
        return_centers is True.
    """

    client = get_client(client=client)

    generator = _create_rs_generator(random_state=random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    centers, n_centers = _get_centers(generator, centers, center_box,
                                      n_samples, n_features,
                                      dtype)

    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    if rows_per_part == 1:
        worker_rows[-1] += n_samples % n_parts
    else:
        worker_rows[-1] += n_samples % rows_per_part

    worker_rows = tuple(worker_rows)

    logger.debug("Generating %d samples across %d partitions on "
                 "%d workers (total=%d samples)" %
                 (math.ceil(n_samples / len(workers)),
                  n_parts, len(workers), n_samples))

    seeds = generator.randint(n_samples, size=len(parts_workers))
    parts = [client.submit(_create_local_data,
                           part_rows,
                           n_features,
                           centers,
                           cluster_std,
                           shuffle,
                           int(seeds[idx]),
                           order,
                           dtype,
                           pure=False,
                           workers=[parts_workers[idx]])
             for idx, part_rows in enumerate(worker_rows)]

    X = [client.submit(_get_X, f, pure=False)
         for idx, f in enumerate(parts)]
    y = [client.submit(_get_labels, f, pure=False)
         for idx, f in enumerate(parts)]

    X_del = _create_delayed(X, dtype, worker_rows, n_features)
    y_del = _create_delayed(y, dtype, worker_rows)

    X_final = da.concatenate(X_del, axis=0)
    y_final = da.concatenate(y_del, axis=0)

    if return_centers:
        return X_final, y_final, centers
    else:
        return X_final, y_final
Пример #7
0
def confusion_matrix(y_true,
                     y_pred,
                     labels=None,
                     normalize=None,
                     sample_weight=None,
                     client=None):
    """Compute confusion matrix to evaluate the accuracy of a classification.

    Parameters
    ----------
    y_true : dask.Array (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Ground truth (correct) target values.
    y_pred : dask.Array (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Estimated target values.
    labels : array-like (device or host) shape = (n_classes,), optional
        List of labels to index the matrix. This may be used to reorder or
        select a subset of labels. If None is given, those that appear at least
        once in y_true or y_pred are used in sorted order.
    sample_weight : dask.Array (device or host) shape = (n_samples,), optional
        Sample weights.
    normalize : string in [‘true’, ‘pred’, ‘all’]
        Normalizes confusion matrix over the true (rows), predicted (columns)
        conditions or all the population. If None, confusion matrix will not be
        normalized.
    client : dask.distributed.Client, optional
        Dask client to use. Will use the default client if None.

    Returns
    -------
    C : array-like (device or host) shape = (n_classes, n_classes)
        Confusion matrix.
    """
    client = get_client(client)

    if labels is None:
        labels = sorted_unique_labels(y_true, y_pred)

    if normalize not in ['true', 'pred', 'all', None]:
        msg = "normalize must be one of " \
            f"{{'true', 'pred', 'all', None}}, got {normalize}."
        raise ValueError(msg)

    use_sample_weight = bool(sample_weight is not None)
    dask_arrays = [y_true, y_pred, sample_weight] if use_sample_weight else \
        [y_true, y_pred]

    # run cm computation on each partition.
    data = DistributedDataHandler.create(dask_arrays, client=client)
    cms = [
        client.submit(_local_cm, p, labels, use_sample_weight,
                      workers=[w]).result() for w, p in data.gpu_futures
    ]

    # reduce each partition's result into one cupy matrix
    cm = sum(cms)

    with np.errstate(all='ignore'):
        if normalize == 'true':
            cm = cm / cm.sum(axis=1, keepdims=True)
        elif normalize == 'pred':
            cm = cm / cm.sum(axis=0, keepdims=True)
        elif normalize == 'all':
            cm = cm / cm.sum()
        cm = np.nan_to_num(cm)

    return cm
Пример #8
0
def make_classification(n_samples=100, n_features=20, n_informative=2,
                        n_redundant=2, n_repeated=0, n_classes=2,
                        n_clusters_per_class=2, weights=None, flip_y=0.01,
                        class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
                        shuffle=True, random_state=None, order='F',
                        dtype='float32', n_parts=None, client=None):
    """
    Generate a random n-class classification problem.

    This initially creates clusters of points normally distributed (std=1)
    about vertices of an `n_informative`-dimensional hypercube with sides of
    length ``2 * class_sep`` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.

    Without shuffling, ``X`` horizontally stacks features in the following
    order: the primary `n_informative` features, followed by `n_redundant`
    linear combinations of the informative features, followed by `n_repeated`
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    ``X[:, :n_informative + n_redundant + n_repeated]``.

    Examples
    --------

    .. code-block:: python

        from dask.distributed import Client
        from dask_cuda import LocalCUDACluster
        from cuml.dask.datasets.classification import make_classification
        cluster = LocalCUDACluster()
        client = Client(cluster)
        X, y = make_classification(n_samples=10, n_features=4,
                                   n_informative=2, n_classes=2)

        print("X:")
        print(X.compute())

        print("y:")
        print(y.compute())

    Output:

    .. code-block:: python

        X:
        [[-1.6990056  -0.8241044  -0.06997631  0.45107925]
        [-1.8105277   1.7829906   0.492909    0.05390119]
        [-0.18290454 -0.6155432   0.6667889  -1.0053712 ]
        [-2.7530136  -0.888528   -0.5023055   1.3983376 ]
        [-0.9788184  -0.89851004  0.10802134 -0.10021686]
        [-0.76883423 -1.0689086   0.01249526 -0.1404741 ]
        [-1.5676656  -0.83082974 -0.03072987  0.34499463]
        [-0.9381793  -1.0971068  -0.07465998  0.02618019]
        [-1.3021476  -0.87076336  0.02249984  0.15187258]
        [ 1.1820307   1.7524253   1.5087451  -2.4626074 ]]

        y:
        [0 1 0 0 0 0 0 0 0 1]

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features,
        `n_repeated` duplicated features and
        ``n_features-n_informative-n_redundant-n_repeated`` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape ``(n_classes,)`` or ``(n_classes - 1,)``, \
        (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if ``len(weights) == n_classes - 1``,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of
        `weights` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The generated samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    How we extended the dask MNMG version from the single GPU version:

    1. We generate centroids of shape ``(n_centroids, n_informative)``
    2. We generate an informative covariance of shape \
        ``(n_centroids, n_informative, n_informative)``
    3. We generate a redundant covariance of shape \
        ``(n_informative, n_redundant)``
    4. We generate the indices for the repeated features \
    We pass along the references to the futures of the above arrays \
    with each part to the single GPU \
    `cuml.datasets.classification.make_classification` so that each \
    part (and worker) has access to the correct values to generate \
    data from the same covariances

    """

    client = get_client(client=client)

    rs = _create_rs_generator(random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    n_clusters = n_classes * n_clusters_per_class

    # create centroids
    centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
                                             rs)).astype(dtype, copy=False)

    covariance_seeds = rs.randint(n_features, size=2)
    informative_covariance = client.submit(_create_covariance,
                                           (n_clusters, n_informative,
                                            n_informative),
                                           int(covariance_seeds[0]),
                                           pure=False)

    redundant_covariance = client.submit(_create_covariance,
                                         (n_informative,
                                          n_redundant),
                                         int(covariance_seeds[1]),
                                         pure=False)

    # repeated indices
    n = n_informative + n_redundant
    repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype)
                        + 0.5).astype(np.intp)

    # scale and shift
    if shift is None:
        shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep

    if scale is None:
        scale = 1 + 100 * rs.rand(n_features, dtype=dtype)

    # Create arrays on each worker (gpu)
    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    worker_rows[-1] += (n_samples % n_parts)

    worker_rows = tuple(worker_rows)

    part_seeds = rs.permutation(n_parts)
    parts = [client.submit(sg_make_classification, worker_rows[i], n_features,
                           n_informative, n_redundant, n_repeated, n_classes,
                           n_clusters_per_class, weights, flip_y, class_sep,
                           hypercube, shift, scale, shuffle,
                           int(part_seeds[i]), order, dtype, centroids,
                           informative_covariance, redundant_covariance,
                           repeated_indices, pure=False,
                           workers=[parts_workers[i]])
             for i in range(len(parts_workers))]

    X_parts = [client.submit(_get_X, f, pure=False)
               for idx, f in enumerate(parts)]
    y_parts = [client.submit(_get_labels, f, pure=False)
               for idx, f in enumerate(parts)]

    X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features)
    y_dela = _create_delayed(y_parts, dtype, worker_rows)

    X = da.concatenate(X_dela)
    y = da.concatenate(y_dela)

    return X, y
Пример #9
0
def make_classification(n_samples=100,
                        n_features=20,
                        n_informative=2,
                        n_redundant=2,
                        n_repeated=0,
                        n_classes=2,
                        n_clusters_per_class=2,
                        weights=None,
                        flip_y=0.01,
                        class_sep=1.0,
                        hypercube=True,
                        shift=0.0,
                        scale=1.0,
                        shuffle=True,
                        random_state=None,
                        order='F',
                        dtype='float32',
                        n_parts=None,
                        client=None):
    """
    Generate a random n-class classification problem.

    This initially creates clusters of points normally distributed (std=1)
    about vertices of an `n_informative`-dimensional hypercube with sides of
    length :py:`2 * class_sep` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.

    Without shuffling, `X` horizontally stacks features in the following
    order: the primary `n_informative` features, followed by `n_redundant`
    linear combinations of the informative features, followed by `n_repeated`
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    :py:`X[:, :n_informative + n_redundant + n_repeated]`.

    Examples
    --------
    .. code-block:: python

        >>> from dask.distributed import Client
        >>> from dask_cuda import LocalCUDACluster
        >>> from cuml.dask.datasets.classification import make_classification
        >>> cluster = LocalCUDACluster()
        >>> client = Client(cluster)
        >>> X, y = make_classification(n_samples=10, n_features=4,
        ...                            random_state=1, n_informative=2,
        ...                            n_classes=2)
        >>> print(X.compute()) # doctest: +SKIP
        [[-1.1273878   1.2844919  -0.32349187  0.1595734 ]
        [ 0.80521786 -0.65946865 -0.40753683  0.15538901]
        [ 1.0404129  -1.481386    1.4241115   1.2664981 ]
        [-0.92821544 -0.6805706  -0.26001272  0.36004275]
        [-1.0392245  -1.1977317   0.16345565 -0.21848428]
        [ 1.2273135  -0.529214    2.4799604   0.44108105]
        [-1.9163864  -0.39505136 -1.9588828  -1.8881643 ]
        [-0.9788184  -0.89851004 -0.08339313  0.1130247 ]
        [-1.0549078  -0.8993015  -0.11921967  0.04821599]
        [-1.8388828  -1.4063598  -0.02838472 -1.0874642 ]]
        >>> print(y.compute()) # doctest: +SKIP
        [1 0 0 0 0 1 0 0 0 0]
        >>> client.close()
        >>> cluster.close()

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features,
        `n_repeated` duplicated features and
        :py:`n_features-n_informative-n_redundant-n_repeated` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape :py:`(n_classes,)` or :py:`(n_classes - 1,)`\
        , (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if :py:`len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of
        `weights` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    order: str, optional (default='F')
        The order of the generated samples
    dtype : str, optional (default='float32')
        Dtype of the generated samples
    n_parts : int (default = None)
        number of partitions to generate (this can be greater
        than the number of workers)

    Returns
    -------
    X : dask.array backed by CuPy array of shape [n_samples, n_features]
        The generated samples.
    y : dask.array backed by CuPy array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    How we extended the dask MNMG version from the single GPU version:

    1. We generate centroids of shape ``(n_centroids, n_informative)``
    2. We generate an informative covariance of shape \
        ``(n_centroids, n_informative, n_informative)``
    3. We generate a redundant covariance of shape \
        ``(n_informative, n_redundant)``
    4. We generate the indices for the repeated features \
    We pass along the references to the futures of the above arrays \
    with each part to the single GPU \
    `cuml.datasets.classification.make_classification` so that each \
    part (and worker) has access to the correct values to generate \
    data from the same covariances

    """

    client = get_client(client=client)

    rs = _create_rs_generator(random_state)

    workers = list(client.scheduler_info()['workers'].keys())

    n_parts = n_parts if n_parts is not None else len(workers)
    parts_workers = (workers * n_parts)[:n_parts]

    n_clusters = n_classes * n_clusters_per_class

    # create centroids
    centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
                                             rs)).astype(dtype, copy=False)

    covariance_seeds = rs.randint(n_features, size=2)
    informative_covariance = client.submit(
        _create_covariance, (n_clusters, n_informative, n_informative),
        int(covariance_seeds[0]),
        pure=False)

    redundant_covariance = client.submit(_create_covariance,
                                         (n_informative, n_redundant),
                                         int(covariance_seeds[1]),
                                         pure=False)

    # repeated indices
    n = n_informative + n_redundant
    repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype) +
                        0.5).astype(np.intp)

    # scale and shift
    if shift is None:
        shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep

    if scale is None:
        scale = 1 + 100 * rs.rand(n_features, dtype=dtype)

    # Create arrays on each worker (gpu)
    rows_per_part = max(1, int(n_samples / n_parts))

    worker_rows = [rows_per_part] * n_parts

    worker_rows[-1] += (n_samples % n_parts)

    worker_rows = tuple(worker_rows)

    part_seeds = rs.permutation(n_parts)
    parts = [
        client.submit(sg_make_classification,
                      worker_rows[i],
                      n_features,
                      n_informative,
                      n_redundant,
                      n_repeated,
                      n_classes,
                      n_clusters_per_class,
                      weights,
                      flip_y,
                      class_sep,
                      hypercube,
                      shift,
                      scale,
                      shuffle,
                      int(part_seeds[i]),
                      order,
                      dtype,
                      centroids,
                      informative_covariance,
                      redundant_covariance,
                      repeated_indices,
                      pure=False,
                      workers=[parts_workers[i]])
        for i in range(len(parts_workers))
    ]

    X_parts = [
        client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)
    ]
    y_parts = [
        client.submit(_get_labels, f, pure=False)
        for idx, f in enumerate(parts)
    ]

    X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features)
    y_dela = _create_delayed(y_parts, np.int64, worker_rows)

    X = da.concatenate(X_dela)
    y = da.concatenate(y_dela)

    return X, y
Пример #10
0
def make_regression(n_samples=100,
                    n_features=100,
                    n_informative=10,
                    n_targets=1,
                    bias=0.0,
                    effective_rank=None,
                    tail_strength=0.5,
                    noise=0.0,
                    shuffle=False,
                    coef=False,
                    random_state=None,
                    n_parts=1,
                    n_samples_per_part=None,
                    order='F',
                    dtype='float32',
                    client=None,
                    use_full_low_rank=True):
    """
    Generate a random regression problem.

    The input set can either be well conditioned (by default) or have a low
    rank-fat tail singular profile.

    The output is generated by applying a (potentially biased) random linear
    regression model with "n_informative" nonzero regressors to the previously
    generated input and some gaussian centered noise with some adjustable
    scale.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=100)
        The number of features.
    n_informative : int, optional (default=10)
        The number of informative features, i.e., the number of features used
        to build the linear model used to generate the output.
    n_targets : int, optional (default=1)
        The number of regression targets, i.e., the dimension of the y output
        vector associated with a sample. By default, the output is a scalar.
    bias : float, optional (default=0.0)
        The bias term in the underlying linear model.
    effective_rank : int or None, optional (default=None)
        if not None:
            The approximate number of singular vectors required to explain most
            of the input data by linear combinations. Using this kind of
            singular spectrum in the input allows the generator to reproduce
            the correlations often observed in practice.

        if None:
            The input set is well conditioned, centered and gaussian with
            unit variance.

    tail_strength : float between 0.0 and 1.0, optional (default=0.5)
        The relative importance of the fat noisy tail of the singular values
        profile if "effective_rank" is not None.
    noise : float, optional (default=0.0)
        The standard deviation of the gaussian noise applied to the output.
    shuffle : boolean, optional (default=False)
        Shuffle the samples and the features.
    coef : boolean, optional (default=False)
        If True, the coefficients of the underlying linear model are returned.
    random_state : int, CuPy RandomState instance, Dask RandomState instance \
                   or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
    n_parts : int, optional (default=1)
        The number of parts of work.
    order : str, optional (default='F')
        Row-major or Col-major
    dtype: str, optional (default='float32')
        dtype of generated data
    use_full_low_rank : boolean (default=True)
        Whether to use the entire dataset to generate the low rank matrix.
        If False, it creates a low rank covariance and uses the
        corresponding covariance to generate a multivariate normal
        distribution on the remaining chunks

    Returns
    -------
    X : Dask-CuPy array of shape [n_samples, n_features]
        The input samples.
    y : Dask-CuPy array of shape [n_samples] or [n_samples, n_targets]
        The output values.
    coef : Dask-CuPy array of shape [n_features] \
           or [n_features, n_targets], optional
        The coefficient of the underlying linear model. It is returned only if
        coef is True.

    Notes
    -----
    Known Performance Limitations:
     1. When `effective_rank` is set and `use_full_low_rank` is True, \
        we cannot generate order `F` by construction, and an explicit \
        transpose is performed on each part. This may cause memory to spike \
        (other parameters make order `F` by construction)
     2. When `n_targets > 1` and `order = 'F'` as above, we have to \
        explicity transpose the `y` array. If `coef = True`, then we also \
        explicity transpose the `ground_truth` array
     3. When `shuffle = True` and `order = F`, there are memory spikes to \
        shuffle the `F` order arrays

    .. note:: If out-of-memory errors are encountered in any of the above
        configurations, try increasing the `n_parts` parameter.
    """

    client = get_client(client=client)

    n_informative = min(n_features, n_informative)
    rs = _create_rs_generator(random_state)

    if n_samples_per_part is None:
        n_samples_per_part = max(1, int(n_samples / n_parts))

    data_chunksizes = [n_samples_per_part] * n_parts

    data_chunksizes[-1] += (n_samples % n_parts)

    data_chunksizes = tuple(data_chunksizes)

    if effective_rank is None:
        # Randomly generate a well conditioned input set
        if order == 'F':
            X = _f_order_standard_normal(client, rs, data_chunksizes,
                                         n_features, dtype)

        elif order == 'C':
            X = rs.standard_normal((n_samples, n_features),
                                   chunks=(data_chunksizes, -1),
                                   dtype=dtype)

    else:
        # Randomly generate a low rank, fat tail input set
        if use_full_low_rank:
            X = make_low_rank_matrix(n_samples=n_samples,
                                     n_features=n_features,
                                     effective_rank=effective_rank,
                                     tail_strength=tail_strength,
                                     random_state=rs,
                                     n_parts=n_parts,
                                     n_samples_per_part=n_samples_per_part,
                                     dtype=dtype)

            X = X.rechunk({0: data_chunksizes, 1: -1})
        else:
            seed = int(rs.randint(n_samples).compute())
            covar = _make_low_rank_covariance(client, n_features,
                                              effective_rank, tail_strength,
                                              seed, n_parts,
                                              n_samples_per_part, dtype)
            X = _data_from_multivariate_normal(client, rs, covar,
                                               data_chunksizes, n_features,
                                               dtype)

        X = _convert_to_order(client, X, data_chunksizes, order, n_features,
                              dtype)

    # Generate a ground truth model with only n_informative features being non
    # zeros (the other features are not correlated to y and should be ignored
    # by a sparsifying regularizers such as L1 or elastic net)
    ground_truth = 100.0 * rs.standard_normal((n_informative, n_targets),
                                              chunks=(n_samples_per_part, -1),
                                              dtype=dtype)

    y = da.dot(X[:, :n_informative], ground_truth) + bias

    if n_informative != n_features:
        zeroes = 0.0 * rs.standard_normal(
            (n_features - n_informative, n_targets), dtype=dtype)
        ground_truth = da.concatenate([ground_truth, zeroes], axis=0)

    ground_truth = ground_truth.rechunk(-1)

    # Add noise
    if noise > 0.0:
        y += rs.normal(scale=noise, size=y.shape, dtype=dtype)

    # Randomly permute samples and features
    if shuffle:
        features_indices = np.random.permutation(n_features)
        X, y = _shuffle(client, rs, X, y, data_chunksizes, n_features,
                        features_indices, n_targets, dtype)

        ground_truth = ground_truth[features_indices, :]

    y = da.squeeze(y)

    if order == 'F' and n_targets > 1:
        y = _convert_to_order(client, y, y.chunks[0], order, n_targets, dtype)
        if coef:
            ground_truth = _convert_to_order(client, ground_truth,
                                             ground_truth.chunks[0], order,
                                             n_targets, dtype)

    if coef:
        ground_truth = da.squeeze(ground_truth)
        return X, y, ground_truth

    else:
        return X, y