def _shuffle(client, rs, X, y, chunksizes, n_features, features_indices, n_targets, dtype): data_ddh = DistributedDataHandler.create(data=(X, y), client=client) chunk_seeds = rs.permutation(len(chunksizes)) shuffled = [ client.submit(_dask_shuffle, part, chunksizes[idx], chunk_seeds[idx], features_indices, workers=[w], pure=False) for idx, (w, part) in enumerate(data_ddh.gpu_futures) ] X_shuffled = [ client.submit(_get_X, f, pure=False) for idx, f in enumerate(shuffled) ] y_shuffled = [ client.submit(_get_labels, f, pure=False) for idx, f in enumerate(shuffled) ] X_dela = _create_delayed(X_shuffled, dtype, chunksizes, n_features) y_dela = _create_delayed(y_shuffled, dtype, chunksizes, n_targets) return da.concatenate(X_dela, axis=0), da.concatenate(y_dela, axis=0)
def _convert_C_to_F_order(client, X, chunksizes, n_features, dtype): X_ddh = DistributedDataHandler.create(data=X, client=client) X_converted = [client.submit(cp.array, X_part, copy=False, order='F', workers=[w]) for idx, (w, X_part) in enumerate(X_ddh.gpu_futures)] X_dela = _create_delayed(X_converted, dtype, chunksizes, n_features) return da.concatenate(X_dela, axis=0)
def _f_order_standard_normal(client, rs, chunksizes, ncols, dtype): workers = list(client.has_what().keys()) n_chunks = len(chunksizes) chunks_workers = (workers * n_chunks)[:n_chunks] chunk_seeds = rs.permutation(len(chunksizes)) chunks = [client.submit(_dask_f_order_standard_normal, chunksize, ncols, dtype, chunk_seeds[idx], workers=[chunks_workers[idx]], pure=False) for idx, chunksize in enumerate(chunksizes)] chunks_dela = _create_delayed(chunks, dtype, chunksizes, ncols) return da.concatenate(chunks_dela, axis=0)
def _data_from_multivariate_normal(client, rs, covar, chunksizes, n_features, dtype): workers = list(client.has_what().keys()) n_chunks = len(chunksizes) chunks_workers = (workers * n_chunks)[:n_chunks] chunk_seeds = rs.permutation(len(chunksizes)) data_parts = [client.submit(_dask_data_from_multivariate_normal, chunk_seeds[idx], covar, chunksizes[idx], n_features, dtype, workers=[chunks_workers[idx]], pure=False) for idx, chunk in enumerate(chunksizes)] data_dela = _create_delayed(data_parts, dtype, chunksizes, n_features) return da.concatenate(data_dela, axis=0)
def make_blobs(n_samples=100, n_features=2, centers=None, cluster_std=1.0, n_parts=None, center_box=(-10, 10), shuffle=True, random_state=None, return_centers=False, verbosity=logger.LEVEL_INFO, order='F', dtype='float32', client=None): """ Makes labeled Dask-Cupy arrays containing blobs for a randomly generated set of centroids. This function calls `make_blobs` from `cuml.datasets` on each Dask worker and aggregates them into a single Dask Dataframe. For more information on Scikit-learn's `make_blobs: <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html>`_. Parameters ---------- n_samples : int number of rows n_features : int number of features centers : int or array of shape [n_centers, n_features], optional (default=None) The number of centers to generate, or the fixed center locations. If n_samples is an int and centers is None, 3 centers are generated. If n_samples is array-like, centers must be either None or an array of length equal to the length of n_samples. cluster_std : float (default = 1.0) standard deviation of points around centroid n_parts : int (default = None) number of partitions to generate (this can be greater than the number of workers) center_box : tuple (int, int) (default = (-10, 10)) the bounding box which constrains all the centroids random_state : int (default = None) sets random seed (or use None to reinitialize each time) return_centers : bool, optional (default=False) If True, then return the centers of each cluster verbosity : int (default = cuml.logger.LEVEL_INFO) Logging level. shuffle : bool (default=False) Shuffles the samples on each worker. order: str, optional (default='F') The order of the generated samples dtype : str, optional (default='float32') Dtype of the generated samples client : dask.distributed.Client (optional) Dask client to use Returns ------- X : dask.array backed by CuPy array of shape [n_samples, n_features] The input samples. y : dask.array backed by CuPy array of shape [n_samples] The output values. centers : dask.array backed by CuPy array of shape [n_centers, n_features], optional The centers of the underlying blobs. It is returned only if return_centers is True. """ client = get_client(client=client) generator = _create_rs_generator(random_state=random_state) workers = list(client.scheduler_info()['workers'].keys()) n_parts = n_parts if n_parts is not None else len(workers) parts_workers = (workers * n_parts)[:n_parts] centers, n_centers = _get_centers(generator, centers, center_box, n_samples, n_features, dtype) rows_per_part = max(1, int(n_samples / n_parts)) worker_rows = [rows_per_part] * n_parts if rows_per_part == 1: worker_rows[-1] += n_samples % n_parts else: worker_rows[-1] += n_samples % rows_per_part worker_rows = tuple(worker_rows) logger.debug("Generating %d samples across %d partitions on " "%d workers (total=%d samples)" % (math.ceil(n_samples / len(workers)), n_parts, len(workers), n_samples)) seeds = generator.randint(n_samples, size=len(parts_workers)) parts = [client.submit(_create_local_data, part_rows, n_features, centers, cluster_std, shuffle, int(seeds[idx]), order, dtype, pure=False, workers=[parts_workers[idx]]) for idx, part_rows in enumerate(worker_rows)] X = [client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)] y = [client.submit(_get_labels, f, pure=False) for idx, f in enumerate(parts)] X_del = _create_delayed(X, dtype, worker_rows, n_features) y_del = _create_delayed(y, dtype, worker_rows) X_final = da.concatenate(X_del, axis=0) y_final = da.concatenate(y_del, axis=0) if return_centers: return X_final, y_final, centers else: return X_final, y_final
def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None, order='F', dtype='float32', n_parts=None, client=None): """ Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an `n_informative`-dimensional hypercube with sides of length ``2 * class_sep`` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, ``X`` horizontally stacks features in the following order: the primary `n_informative` features, followed by `n_redundant` linear combinations of the informative features, followed by `n_repeated` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns ``X[:, :n_informative + n_redundant + n_repeated]``. Examples -------- .. code-block:: python from dask.distributed import Client from dask_cuda import LocalCUDACluster from cuml.dask.datasets.classification import make_classification cluster = LocalCUDACluster() client = Client(cluster) X, y = make_classification(n_samples=10, n_features=4, n_informative=2, n_classes=2) print("X:") print(X.compute()) print("y:") print(y.compute()) Output: .. code-block:: python X: [[-1.6990056 -0.8241044 -0.06997631 0.45107925] [-1.8105277 1.7829906 0.492909 0.05390119] [-0.18290454 -0.6155432 0.6667889 -1.0053712 ] [-2.7530136 -0.888528 -0.5023055 1.3983376 ] [-0.9788184 -0.89851004 0.10802134 -0.10021686] [-0.76883423 -1.0689086 0.01249526 -0.1404741 ] [-1.5676656 -0.83082974 -0.03072987 0.34499463] [-0.9381793 -1.0971068 -0.07465998 0.02618019] [-1.3021476 -0.87076336 0.02249984 0.15187258] [ 1.1820307 1.7524253 1.5087451 -2.4626074 ]] y: [0 1 0 0 0 0 0 0 0 1] Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` duplicated features and ``n_features-n_informative-n_redundant-n_repeated`` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=0) The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : array-like of shape ``(n_classes,)`` or ``(n_classes - 1,)``, \ (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if ``len(weights) == n_classes - 1``, then the last class weight is automatically inferred. More than `n_samples` samples may be returned if the sum of `weights` exceeds 1. flip_y : float, optional (default=0.01) The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. class_sep : float, optional (default=1.0) The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape [n_features] or None, optional (default=0.0) Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape [n_features] or None, optional (default=1.0) Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. order: str, optional (default='F') The order of the generated samples dtype : str, optional (default='float32') Dtype of the generated samples n_parts : int (default = None) number of partitions to generate (this can be greater than the number of workers) Returns ------- X : dask.array backed by CuPy array of shape [n_samples, n_features] The generated samples. y : dask.array backed by CuPy array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- How we extended the dask MNMG version from the single GPU version: 1. We generate centroids of shape ``(n_centroids, n_informative)`` 2. We generate an informative covariance of shape \ ``(n_centroids, n_informative, n_informative)`` 3. We generate a redundant covariance of shape \ ``(n_informative, n_redundant)`` 4. We generate the indices for the repeated features \ We pass along the references to the futures of the above arrays \ with each part to the single GPU \ `cuml.datasets.classification.make_classification` so that each \ part (and worker) has access to the correct values to generate \ data from the same covariances """ client = get_client(client=client) rs = _create_rs_generator(random_state) workers = list(client.scheduler_info()['workers'].keys()) n_parts = n_parts if n_parts is not None else len(workers) parts_workers = (workers * n_parts)[:n_parts] n_clusters = n_classes * n_clusters_per_class # create centroids centroids = cp.array(_generate_hypercube(n_clusters, n_informative, rs)).astype(dtype, copy=False) covariance_seeds = rs.randint(n_features, size=2) informative_covariance = client.submit(_create_covariance, (n_clusters, n_informative, n_informative), int(covariance_seeds[0]), pure=False) redundant_covariance = client.submit(_create_covariance, (n_informative, n_redundant), int(covariance_seeds[1]), pure=False) # repeated indices n = n_informative + n_redundant repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype) + 0.5).astype(np.intp) # scale and shift if shift is None: shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep if scale is None: scale = 1 + 100 * rs.rand(n_features, dtype=dtype) # Create arrays on each worker (gpu) rows_per_part = max(1, int(n_samples / n_parts)) worker_rows = [rows_per_part] * n_parts worker_rows[-1] += (n_samples % n_parts) worker_rows = tuple(worker_rows) part_seeds = rs.permutation(n_parts) parts = [client.submit(sg_make_classification, worker_rows[i], n_features, n_informative, n_redundant, n_repeated, n_classes, n_clusters_per_class, weights, flip_y, class_sep, hypercube, shift, scale, shuffle, int(part_seeds[i]), order, dtype, centroids, informative_covariance, redundant_covariance, repeated_indices, pure=False, workers=[parts_workers[i]]) for i in range(len(parts_workers))] X_parts = [client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)] y_parts = [client.submit(_get_labels, f, pure=False) for idx, f in enumerate(parts)] X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features) y_dela = _create_delayed(y_parts, dtype, worker_rows) X = da.concatenate(X_dela) y = da.concatenate(y_dela) return X, y
def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None, order='F', dtype='float32', n_parts=None, client=None): """ Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an `n_informative`-dimensional hypercube with sides of length :py:`2 * class_sep` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, `X` horizontally stacks features in the following order: the primary `n_informative` features, followed by `n_redundant` linear combinations of the informative features, followed by `n_repeated` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns :py:`X[:, :n_informative + n_redundant + n_repeated]`. Examples -------- .. code-block:: python >>> from dask.distributed import Client >>> from dask_cuda import LocalCUDACluster >>> from cuml.dask.datasets.classification import make_classification >>> cluster = LocalCUDACluster() >>> client = Client(cluster) >>> X, y = make_classification(n_samples=10, n_features=4, ... random_state=1, n_informative=2, ... n_classes=2) >>> print(X.compute()) # doctest: +SKIP [[-1.1273878 1.2844919 -0.32349187 0.1595734 ] [ 0.80521786 -0.65946865 -0.40753683 0.15538901] [ 1.0404129 -1.481386 1.4241115 1.2664981 ] [-0.92821544 -0.6805706 -0.26001272 0.36004275] [-1.0392245 -1.1977317 0.16345565 -0.21848428] [ 1.2273135 -0.529214 2.4799604 0.44108105] [-1.9163864 -0.39505136 -1.9588828 -1.8881643 ] [-0.9788184 -0.89851004 -0.08339313 0.1130247 ] [-1.0549078 -0.8993015 -0.11921967 0.04821599] [-1.8388828 -1.4063598 -0.02838472 -1.0874642 ]] >>> print(y.compute()) # doctest: +SKIP [1 0 0 0 0 1 0 0 0 0] >>> client.close() >>> cluster.close() Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` duplicated features and :py:`n_features-n_informative-n_redundant-n_repeated` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=0) The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : array-like of shape :py:`(n_classes,)` or :py:`(n_classes - 1,)`\ , (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if :py:`len(weights) == n_classes - 1`, then the last class weight is automatically inferred. More than `n_samples` samples may be returned if the sum of `weights` exceeds 1. flip_y : float, optional (default=0.01) The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. class_sep : float, optional (default=1.0) The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape [n_features] or None, optional (default=0.0) Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape [n_features] or None, optional (default=1.0) Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. order: str, optional (default='F') The order of the generated samples dtype : str, optional (default='float32') Dtype of the generated samples n_parts : int (default = None) number of partitions to generate (this can be greater than the number of workers) Returns ------- X : dask.array backed by CuPy array of shape [n_samples, n_features] The generated samples. y : dask.array backed by CuPy array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- How we extended the dask MNMG version from the single GPU version: 1. We generate centroids of shape ``(n_centroids, n_informative)`` 2. We generate an informative covariance of shape \ ``(n_centroids, n_informative, n_informative)`` 3. We generate a redundant covariance of shape \ ``(n_informative, n_redundant)`` 4. We generate the indices for the repeated features \ We pass along the references to the futures of the above arrays \ with each part to the single GPU \ `cuml.datasets.classification.make_classification` so that each \ part (and worker) has access to the correct values to generate \ data from the same covariances """ client = get_client(client=client) rs = _create_rs_generator(random_state) workers = list(client.scheduler_info()['workers'].keys()) n_parts = n_parts if n_parts is not None else len(workers) parts_workers = (workers * n_parts)[:n_parts] n_clusters = n_classes * n_clusters_per_class # create centroids centroids = cp.array(_generate_hypercube(n_clusters, n_informative, rs)).astype(dtype, copy=False) covariance_seeds = rs.randint(n_features, size=2) informative_covariance = client.submit( _create_covariance, (n_clusters, n_informative, n_informative), int(covariance_seeds[0]), pure=False) redundant_covariance = client.submit(_create_covariance, (n_informative, n_redundant), int(covariance_seeds[1]), pure=False) # repeated indices n = n_informative + n_redundant repeated_indices = ((n - 1) * rs.rand(n_repeated, dtype=dtype) + 0.5).astype(np.intp) # scale and shift if shift is None: shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep if scale is None: scale = 1 + 100 * rs.rand(n_features, dtype=dtype) # Create arrays on each worker (gpu) rows_per_part = max(1, int(n_samples / n_parts)) worker_rows = [rows_per_part] * n_parts worker_rows[-1] += (n_samples % n_parts) worker_rows = tuple(worker_rows) part_seeds = rs.permutation(n_parts) parts = [ client.submit(sg_make_classification, worker_rows[i], n_features, n_informative, n_redundant, n_repeated, n_classes, n_clusters_per_class, weights, flip_y, class_sep, hypercube, shift, scale, shuffle, int(part_seeds[i]), order, dtype, centroids, informative_covariance, redundant_covariance, repeated_indices, pure=False, workers=[parts_workers[i]]) for i in range(len(parts_workers)) ] X_parts = [ client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts) ] y_parts = [ client.submit(_get_labels, f, pure=False) for idx, f in enumerate(parts) ] X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features) y_dela = _create_delayed(y_parts, np.int64, worker_rows) X = da.concatenate(X_dela) y = da.concatenate(y_dela) return X, y