Ejemplo n.º 1
0
def test_onehot_inverse_transform(drop, as_array):
    X = DataFrame({'g': ['M', 'F', 'F'], 'i': [1, 3, 2]})
    if as_array:
        X = _from_df_to_cupy(X)
        drop = _convert_drop(drop)

    enc = OneHotEncoder(drop=drop)
    ohe = enc.fit_transform(X)
    inv = enc.inverse_transform(ohe)

    assert_inverse_equal(inv, X)
Ejemplo n.º 2
0
def test_onehot_categories(as_array):
    X = DataFrame({'chars': ['a', 'b'], 'int': [0, 2]})
    categories = DataFrame({'chars': ['a', 'b', 'c'], 'int': [0, 1, 2]})
    if as_array:
        X = _from_df_to_cupy(X)
        categories = _from_df_to_cupy(categories).transpose()

    enc = OneHotEncoder(categories=categories, sparse=False)
    ref = cp.array([[1., 0., 0., 1., 0., 0.], [0., 1., 0., 0., 0., 1.]])
    res = enc.fit_transform(X)
    cp.testing.assert_array_equal(res, ref)
Ejemplo n.º 3
0
def test_onehot_inverse_transform_handle_unknown(as_array):
    X = DataFrame({'chars': ['a', 'b'], 'int': [0, 2]})
    Y_ohe = cp.array([[0., 0., 1., 0.], [0., 1., 0., 1.]])
    ref = DataFrame({'chars': [None, 'b'], 'int': [0, 2]})
    if as_array:
        X = _from_df_to_cupy(X)
        ref = DataFrame({0: [None, ord('b')], 1: [0, 2]})

    enc = OneHotEncoder(handle_unknown='ignore')
    enc = enc.fit(X)
    df = enc.inverse_transform(Y_ohe)
    assert_inverse_equal(df, ref)
Ejemplo n.º 4
0
def test_onehot_drop_idx_first(as_array):
    X_ary = [['c', 2, 'a'], ['b', 2, 'b']]
    X = DataFrame({'chars': ['c', 'b'], 'int': [2, 2], 'letters': ['a', 'b']})
    if as_array:
        X = _from_df_to_cupy(X)
        X_ary = cp.asnumpy(X)

    enc = OneHotEncoder(sparse=False, drop='first', categories='auto')
    sk_enc = SkOneHotEncoder(sparse=False, drop='first', categories='auto')
    ohe = enc.fit_transform(X)
    ref = sk_enc.fit_transform(X_ary)
    cp.testing.assert_array_equal(ohe, ref)
    inv = enc.inverse_transform(ohe)
    assert_inverse_equal(inv, X)
Ejemplo n.º 5
0
def test_onehot_vs_skonehot(as_array):
    X = DataFrame({'gender': ['M', 'F', 'F'], 'int': [1, 3, 2]})
    skX = from_df_to_array(X)
    if as_array:
        X = _from_df_to_cupy(X)
        skX = cp.asnumpy(X)

    enc = OneHotEncoder(sparse=True)
    skohe = SkOneHotEncoder(sparse=True)

    ohe = enc.fit_transform(X)
    ref = skohe.fit_transform(skX)

    cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
Ejemplo n.º 6
0
def test_onehot_random_inputs(drop, sparse, n_samples, as_array):
    X, ary = generate_inputs_from_categories(n_samples=n_samples,
                                             as_array=as_array)

    enc = OneHotEncoder(sparse=sparse, drop=drop, categories='auto')
    sk_enc = SkOneHotEncoder(sparse=sparse, drop=drop, categories='auto')
    ohe = enc.fit_transform(X)
    ref = sk_enc.fit_transform(ary)
    if sparse:
        cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
    else:
        cp.testing.assert_array_equal(ohe, ref)
    inv_ohe = enc.inverse_transform(ohe)
    assert_inverse_equal(inv_ohe, X)
Ejemplo n.º 7
0
def test_onehot_sparse_drop(as_array):
    X = DataFrame({'g': ['M', 'F', 'F'], 'i': [1, 3, 2], 'l': [5, 5, 6]})
    drop = {'g': 'F', 'i': 3, 'l': 6}

    ary = from_df_to_array(X)
    drop_ary = ['F', 3, 6]
    if as_array:
        X = _from_df_to_cupy(X)
        ary = cp.asnumpy(X)
        drop = drop_ary = _convert_drop(drop)

    enc = OneHotEncoder(sparse=True, drop=drop, categories='auto')
    sk_enc = SkOneHotEncoder(sparse=True, drop=drop_ary, categories='auto')
    ohe = enc.fit_transform(X)
    ref = sk_enc.fit_transform(ary)
    cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
Ejemplo n.º 8
0
def test_onehot_drop_exceptions(drop, pattern, as_array):
    X = DataFrame({'chars': ['c', 'b', 'd'], 'int': [2, 1, 0]})
    if as_array:
        X = _from_df_to_cupy(X)
        drop = _convert_drop(drop) if not isinstance(drop, DataFrame) else drop

    with pytest.raises(ValueError, match=pattern):
        OneHotEncoder(sparse=False, drop=drop).fit(X)
Ejemplo n.º 9
0
def test_onehot_drop_one_of_each(as_array):
    X = DataFrame({'chars': ['c', 'b'], 'int': [2, 2], 'letters': ['a', 'b']})
    drop = dict({'chars': 'b', 'int': 2, 'letters': 'b'})
    X_ary = from_df_to_array(X)
    drop_ary = ['b', 2, 'b']
    if as_array:
        X = _from_df_to_cupy(X)
        X_ary = cp.asnumpy(X)
        drop = drop_ary = _convert_drop(drop)

    enc = OneHotEncoder(sparse=False, drop=drop, categories='auto')
    ohe = enc.fit_transform(X)
    print(ohe.dtype)
    ref = SkOneHotEncoder(sparse=False, drop=drop_ary,
                          categories='auto').fit_transform(X_ary)
    cp.testing.assert_array_equal(ohe, ref)
    inv = enc.inverse_transform(ohe)
    assert_inverse_equal(inv, X)
Ejemplo n.º 10
0
def test_onehot_categories_shape_mismatch(as_array):
    X = DataFrame({'chars': ['a'], 'int': [0]})
    categories = DataFrame({'chars': ['a', 'b', 'c']})
    if as_array:
        X = _from_df_to_cupy(X)
        categories = _from_df_to_cupy(categories).transpose()

    with pytest.raises(ValueError):
        OneHotEncoder(categories=categories, sparse=False).fit(X)
Ejemplo n.º 11
0
def test_onehot_fit_handle_unknown(as_array):
    X = DataFrame({'chars': ['a', 'b'], 'int': [0, 2]})
    Y = DataFrame({'chars': ['c', 'b'], 'int': [0, 2]})
    if as_array:
        X = _from_df_to_cupy(X)
        Y = _from_df_to_cupy(Y)

    enc = OneHotEncoder(handle_unknown='error', categories=Y)
    with pytest.raises(KeyError):
        enc.fit(X)

    enc = OneHotEncoder(handle_unknown='ignore', categories=Y)
    enc.fit(X)
Ejemplo n.º 12
0
def test_onehot_get_feature_names(as_array):
    fruits = ['apple', 'banana', 'strawberry']
    if as_array:
        fruits = [ord(fruit[0]) for fruit in fruits]
    sizes = [0, 1, 2]
    X = DataFrame({'fruits': fruits, 'sizes': sizes})
    if as_array:
        X = _from_df_to_cupy(X)

    enc = OneHotEncoder().fit(X)

    feature_names_ref = ['x0_'+str(fruit) for fruit in fruits] + \
                        ['x1_'+str(size) for size in sizes]
    feature_names = enc.get_feature_names()
    assert np.array_equal(feature_names, feature_names_ref)

    feature_names_ref = ['fruit_'+str(fruit) for fruit in fruits] + \
                        ['size_'+str(size) for size in sizes]
    feature_names = enc.get_feature_names(['fruit', 'size'])
    assert np.array_equal(feature_names, feature_names_ref)
Ejemplo n.º 13
0
def test_onehot_transform_handle_unknown(as_array):
    X = DataFrame({'chars': ['a', 'b'], 'int': [0, 2]})
    Y = DataFrame({'chars': ['c', 'b'], 'int': [0, 2]})
    if as_array:
        X = _from_df_to_cupy(X)
        Y = _from_df_to_cupy(Y)

    enc = OneHotEncoder(handle_unknown='error', sparse=False)
    enc = enc.fit(X)
    with pytest.raises(KeyError):
        enc.transform(Y)

    enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
    enc = enc.fit(X)
    ohe = enc.transform(Y)
    ref = cp.array([[0., 0., 1., 0.], [0., 1., 0., 1.]])
    cp.testing.assert_array_equal(ohe, ref)
Ejemplo n.º 14
0
def test_onehot_get_categories(as_array):
    X = DataFrame({'chars': ['c', 'b', 'd'], 'ints': [2, 1, 0]})
    ref = [np.array(['b', 'c', 'd']), np.array([0, 1, 2])]
    if as_array:
        X = _from_df_to_cupy(X)
        ref[0] = np.array([ord(x) for x in ref[0]])

    enc = OneHotEncoder().fit(X)
    cats = enc.categories_

    for i in range(len(ref)):
        np.testing.assert_array_equal(ref[i], cats[i].to_array())
Ejemplo n.º 15
0
def test_onehot_category_class_count(total_classes: int):
    # See this for reasoning: https://github.com/rapidsai/cuml/issues/2690
    # All tests use sparse=True to avoid memory errors

    encoder = OneHotEncoder(handle_unknown="ignore", sparse=True)

    # ==== 2 Rows ====
    example_df = DataFrame()
    example_df["high_cardinality_column"] = cp.linspace(
        0, total_classes - 1, total_classes)
    example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * (
        total_classes - 200)

    assert (encoder.fit_transform(example_df).shape[1] == total_classes + 2)

    # ==== 3 Rows ====
    example_df = DataFrame()
    example_df["high_cardinality_column"] = cp.linspace(
        0, total_classes - 1, total_classes)
    example_df["low_cardinality_column"] = ["A"] * total_classes
    example_df["med_cardinality_column"] = ["B"] * total_classes

    assert (encoder.fit_transform(example_df).shape[1] == total_classes + 2)

    # ==== N Rows (Even Split) ====
    num_rows = [3, 10, 100]

    for row_count in num_rows:

        class_per_row = int(math.ceil(total_classes / float(row_count))) + 1
        example_df = DataFrame()

        for row_idx in range(row_count):
            example_df[str(row_idx)] = cp.linspace(
                row_idx * class_per_row, ((row_idx + 1) * class_per_row) - 1,
                class_per_row)

        assert (encoder.fit_transform(example_df).shape[1] == class_per_row *
                row_count)
class OHEColumnTransform(BaseEstimator, TransformerMixin):
    def __init__(self, columns, **kwargs):
        self.columns = columns
        self.kwargs = kwargs
        self.ohe = None

    def fit(self, X, y=None):
        self.ohe = OneHotEncoder(**self.kwargs)
        self.ohe.fit(X[self.columns])
        return self

    def transform(self, X, y=None):
        if self.ohe:
            X_transformed = X.copy()
            cp_ohe = self.ohe.transform(X_transformed[self.columns])
            temp = cudf.DataFrame(
                cp_ohe,
                index=X_transformed.index,
                columns=['ohe_' + str(i) for i in range(cp_ohe.shape[1])])
            X_transformed = X_transformed.drop(self.columns, axis=1)
            X_transformed = X_transformed.join(temp)
            return X_transformed.reset_index(drop=True)
        else:
            raise ("onehot encoding must fit before transform")
Ejemplo n.º 17
0
def test_onehot_category_specific_cases():
    # See this for reasoning: https://github.com/rapidsai/cuml/issues/2690

    # All of these cases use sparse=False, where
    # test_onehot_category_class_count uses sparse=True

    # ==== 2 Rows (Low before High) ====
    example_df = DataFrame()
    example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * 56
    example_df["high_cardinality_column"] = cp.linspace(0, 255, 256)

    encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
    encoder.fit_transform(example_df)

    # ==== 2 Rows (High before Low, used to fail) ====
    example_df = DataFrame()
    example_df["high_cardinality_column"] = cp.linspace(0, 255, 256)
    example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * 56

    encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
    encoder.fit_transform(example_df)
Ejemplo n.º 18
0
class KBinsDiscretizer(TransformerMixin, BaseEstimator, SparseInputTagMixin):
    """
    Bin continuous data into intervals.

    Parameters
    ----------
    n_bins : int or array-like, shape (n_features,) (default=5)
        The number of bins to produce. Raises ValueError if ``n_bins < 2``.

    encode : {'onehot', 'onehot-dense', 'ordinal'}, (default='onehot')
        Method used to encode the transformed result.

        onehot
            Encode the transformed result with one-hot encoding
            and return a sparse matrix. Ignored features are always
            stacked to the right.
        onehot-dense
            Encode the transformed result with one-hot encoding
            and return a dense array. Ignored features are always
            stacked to the right.
        ordinal
            Return the bin identifier encoded as an integer value.

    strategy : {'uniform', 'quantile', 'kmeans'}, (default='quantile')
        Strategy used to define the widths of the bins.

        uniform
            All bins in each feature have identical widths.
        quantile
            All bins in each feature have the same number of points.
        kmeans
            Values in each bin have the same nearest center of a 1D k-means
            cluster.

    Attributes
    ----------
    n_bins_ : int array, shape (n_features,)
        Number of bins per feature. Bins whose width are too small
        (i.e., <= 1e-8) are removed with a warning.

    bin_edges_ : array of arrays, shape (n_features, )
        The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
        Ignored features will have empty arrays.

    See Also
    --------
     cuml.preprocessing.Binarizer : Class used to bin values as ``0`` or
        ``1`` based on a parameter ``threshold``.

    Notes
    -----
    In bin edges for feature ``i``, the first and last values are used only for
    ``inverse_transform``. During transform, bin edges are extended to::

      np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])

    You can combine ``KBinsDiscretizer`` with
    :class:`sklearn.compose.ColumnTransformer` if you only want to preprocess
    part of the features.

    ``KBinsDiscretizer`` might produce constant features (e.g., when
    ``encode = 'onehot'`` and certain bins do not contain any data).
    These features can be removed with feature selection algorithms
    (e.g., :class:`sklearn.feature_selection.VarianceThreshold`).

    Examples
    --------
    >>> X = [[-2, 1, -4,   -1],
    ...      [-1, 2, -3, -0.5],
    ...      [ 0, 3, -2,  0.5],
    ...      [ 1, 4, -1,    2]]
    >>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
    >>> est.fit(X)
    KBinsDiscretizer(...)
    >>> Xt = est.transform(X)
    >>> Xt  # doctest: +SKIP
    array([[ 0., 0., 0., 0.],
           [ 1., 1., 1., 0.],
           [ 2., 2., 2., 1.],
           [ 2., 2., 2., 2.]])

    Sometimes it may be useful to convert the data back into the original
    feature space. The ``inverse_transform`` function converts the binned
    data into the original feature space. Each value will be equal to the mean
    of the two bin edges.

    >>> est.bin_edges_[0]
    array([-2., -1.,  0.,  1.])
    >>> est.inverse_transform(Xt)
    array([[-1.5,  1.5, -3.5, -0.5],
           [-0.5,  2.5, -2.5, -0.5],
           [ 0.5,  3.5, -1.5,  0.5],
           [ 0.5,  3.5, -1.5,  1.5]])

    """

    bin_edges_ = CumlArrayDescriptor()
    n_bins_ = CumlArrayDescriptor()

    @_deprecate_pos_args(version="0.20")
    def __init__(self, n_bins=5, *, encode='onehot', strategy='quantile'):
        self.n_bins = n_bins
        self.encode = encode
        self.strategy = strategy

    def get_param_names(self):
        return super().get_param_names() + ["n_bins", "encode", "strategy"]

    def fit(self, X, y=None) -> "KBinsDiscretizer":
        """
        Fit the estimator.

        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.

        y : None
            Ignored. This parameter exists only for compatibility with
            :class:`sklearn.pipeline.Pipeline`.

        Returns
        -------
        self
        """
        X = self._validate_data(X, dtype='numeric')

        valid_encode = ('onehot', 'onehot-dense', 'ordinal')
        if self.encode not in valid_encode:
            raise ValueError("Valid options for 'encode' are {}. "
                             "Got encode={!r} instead.".format(
                                 valid_encode, self.encode))
        valid_strategy = ('uniform', 'quantile', 'kmeans')
        if self.strategy not in valid_strategy:
            raise ValueError("Valid options for 'strategy' are {}. "
                             "Got strategy={!r} instead.".format(
                                 valid_strategy, self.strategy))

        n_features = X.shape[1]
        n_bins = self._validate_n_bins(n_features)
        n_bins = np.asnumpy(n_bins)

        bin_edges = cpu_np.zeros(n_features, dtype=object)
        for jj in range(n_features):
            column = X[:, jj]
            col_min, col_max = column.min(), column.max()

            if col_min == col_max:
                warnings.warn("Feature %d is constant and will be "
                              "replaced with 0." % jj)
                n_bins[jj] = 1
                bin_edges[jj] = np.array([-np.inf, np.inf])
                continue

            if self.strategy == 'uniform':
                bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)

            elif self.strategy == 'quantile':
                quantiles = np.linspace(0, 100, n_bins[jj] + 1)
                bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
                # Workaround for https://github.com/cupy/cupy/issues/4451
                # This should be removed as soon as a fix is available in cupy
                # in order to limit alterations in the included sklearn code
                bin_edges[jj][-1] = col_max

            elif self.strategy == 'kmeans':
                # Deterministic initialization with uniform spacing
                uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
                init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5

                # 1D k-means procedure
                km = KMeans(n_clusters=n_bins[jj],
                            init=init,
                            n_init=1,
                            output_type='cupy')
                km = km.fit(column[:, None])
                with using_output_type('cupy'):
                    centers = km.cluster_centers_[:, 0]
                # Must sort, centers may be unsorted even with sorted init
                centers.sort()
                bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
                bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]

            # Remove bins whose width are too small (i.e., <= 1e-8)
            if self.strategy in ('quantile', 'kmeans'):
                mask = np.diff(bin_edges[jj], prepend=-np.inf) > 1e-8
                bin_edges[jj] = bin_edges[jj][mask]
                if len(bin_edges[jj]) - 1 != n_bins[jj]:
                    warnings.warn('Bins whose width are too small (i.e., <= '
                                  '1e-8) in feature %d are removed. Consider '
                                  'decreasing the number of bins.' % jj)
                    n_bins[jj] = len(bin_edges[jj]) - 1

        self.bin_edges_ = bin_edges
        self.n_bins_ = n_bins

        if 'onehot' in self.encode:
            self._encoder = OneHotEncoder(categories=np.array(
                [np.arange(i) for i in self.n_bins_]),
                                          sparse=self.encode == 'onehot',
                                          output_type='cupy')
            # Fit the OneHotEncoder with toy datasets
            # so that it's ready for use after the KBinsDiscretizer is fitted
            self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))

        return self

    def _validate_n_bins(self, n_features):
        """Returns n_bins_, the number of bins per feature.
        """
        orig_bins = self.n_bins
        if isinstance(orig_bins, numbers.Number):
            if not isinstance(orig_bins, numbers.Integral):
                raise ValueError("{} received an invalid n_bins type. "
                                 "Received {}, expected int.".format(
                                     KBinsDiscretizer.__name__,
                                     type(orig_bins).__name__))
            if orig_bins < 2:
                raise ValueError(
                    "{} received an invalid number "
                    "of bins. Received {}, expected at least 2.".format(
                        KBinsDiscretizer.__name__, orig_bins))
            return np.full(n_features, orig_bins, dtype=np.int)

        n_bins = check_array(orig_bins,
                             dtype=np.int,
                             copy=True,
                             ensure_2d=False)

        if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
            raise ValueError("n_bins must be a scalar or array "
                             "of shape (n_features,).")

        bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)

        violating_indices = np.where(bad_nbins_value)[0]
        if violating_indices.shape[0] > 0:
            indices = ", ".join(str(i) for i in violating_indices)
            raise ValueError("{} received an invalid number "
                             "of bins at indices {}. Number of bins "
                             "must be at least 2, and must be an int.".format(
                                 KBinsDiscretizer.__name__, indices))
        return n_bins

    def transform(self, X) -> SparseCumlArray:
        """
        Discretize the data.

        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.

        Returns
        -------
        Xt : numeric array-like or sparse matrix
            Data in the binned space.
        """
        check_is_fitted(self)

        Xt = check_array(X, copy=True, dtype=FLOAT_DTYPES)
        n_features = self.n_bins_.shape[0]
        if Xt.shape[1] != n_features:
            raise ValueError("Incorrect number of features. Expecting {}, "
                             "received {}.".format(n_features, Xt.shape[1]))

        bin_edges = self.bin_edges_
        for jj in range(Xt.shape[1]):
            # Values which are close to a bin edge are susceptible to numeric
            # instability. Add eps to X so these values are binned correctly
            # with respect to their decimal truncation. See documentation of
            # numpy.isclose for an explanation of ``rtol`` and ``atol``.
            rtol = 1.e-5
            atol = 1.e-8
            eps = atol + rtol * np.abs(Xt[:, jj])
            Xt[:, jj] = digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
        self.n_bins_ = np.asarray(self.n_bins_)
        np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)

        Xt = Xt.astype(np.int32)
        if self.encode == 'ordinal':
            return Xt

        Xt = self._encoder.transform(Xt)
        return Xt

    def inverse_transform(self, Xt) -> SparseCumlArray:
        """
        Transform discretized data back to original feature space.

        Note that this function does not regenerate the original data
        due to discretization rounding.

        Parameters
        ----------
        Xt : numeric array-like, shape (n_sample, n_features)
            Transformed data in the binned space.

        Returns
        -------
        Xinv : numeric array-like
            Data in the original feature space.
        """
        check_is_fitted(self)

        if 'onehot' in self.encode:
            Xt = check_array(Xt, accept_sparse=['csr', 'coo'], copy=True)
            Xt = self._encoder.inverse_transform(Xt)

        Xinv = check_array(Xt, copy=True, dtype=FLOAT_DTYPES)
        n_features = self.n_bins_.shape[0]
        if Xinv.shape[1] != n_features:
            raise ValueError("Incorrect number of features. Expecting {}, "
                             "received {}.".format(n_features, Xinv.shape[1]))

        for jj in range(n_features):
            bin_edges = self.bin_edges_[jj]
            bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
            idxs = np.asnumpy(Xinv[:, jj])
            Xinv[:, jj] = bin_centers[idxs.astype(np.int32)]

        return Xinv
Ejemplo n.º 19
0
    def fit(self, X, y=None) -> "KBinsDiscretizer":
        """
        Fit the estimator.

        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.

        y : None
            Ignored. This parameter exists only for compatibility with
            :class:`sklearn.pipeline.Pipeline`.

        Returns
        -------
        self
        """
        X = self._validate_data(X, dtype='numeric')

        valid_encode = ('onehot', 'onehot-dense', 'ordinal')
        if self.encode not in valid_encode:
            raise ValueError("Valid options for 'encode' are {}. "
                             "Got encode={!r} instead.".format(
                                 valid_encode, self.encode))
        valid_strategy = ('uniform', 'quantile', 'kmeans')
        if self.strategy not in valid_strategy:
            raise ValueError("Valid options for 'strategy' are {}. "
                             "Got strategy={!r} instead.".format(
                                 valid_strategy, self.strategy))

        n_features = X.shape[1]
        n_bins = self._validate_n_bins(n_features)
        n_bins = np.asnumpy(n_bins)

        bin_edges = cpu_np.zeros(n_features, dtype=object)
        for jj in range(n_features):
            column = X[:, jj]
            col_min, col_max = column.min(), column.max()

            if col_min == col_max:
                warnings.warn("Feature %d is constant and will be "
                              "replaced with 0." % jj)
                n_bins[jj] = 1
                bin_edges[jj] = np.array([-np.inf, np.inf])
                continue

            if self.strategy == 'uniform':
                bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)

            elif self.strategy == 'quantile':
                quantiles = np.linspace(0, 100, n_bins[jj] + 1)
                bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
                # Workaround for https://github.com/cupy/cupy/issues/4451
                # This should be removed as soon as a fix is available in cupy
                # in order to limit alterations in the included sklearn code
                bin_edges[jj][-1] = col_max

            elif self.strategy == 'kmeans':
                # Deterministic initialization with uniform spacing
                uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
                init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5

                # 1D k-means procedure
                km = KMeans(n_clusters=n_bins[jj],
                            init=init,
                            n_init=1,
                            output_type='cupy')
                km = km.fit(column[:, None])
                with using_output_type('cupy'):
                    centers = km.cluster_centers_[:, 0]
                # Must sort, centers may be unsorted even with sorted init
                centers.sort()
                bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
                bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]

            # Remove bins whose width are too small (i.e., <= 1e-8)
            if self.strategy in ('quantile', 'kmeans'):
                mask = np.diff(bin_edges[jj], prepend=-np.inf) > 1e-8
                bin_edges[jj] = bin_edges[jj][mask]
                if len(bin_edges[jj]) - 1 != n_bins[jj]:
                    warnings.warn('Bins whose width are too small (i.e., <= '
                                  '1e-8) in feature %d are removed. Consider '
                                  'decreasing the number of bins.' % jj)
                    n_bins[jj] = len(bin_edges[jj]) - 1

        self.bin_edges_ = bin_edges
        self.n_bins_ = n_bins

        if 'onehot' in self.encode:
            self._encoder = OneHotEncoder(categories=np.array(
                [np.arange(i) for i in self.n_bins_]),
                                          sparse=self.encode == 'onehot',
                                          output_type='cupy')
            # Fit the OneHotEncoder with toy datasets
            # so that it's ready for use after the KBinsDiscretizer is fitted
            self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))

        return self
 def fit(self, X, y=None):
     self.ohe = OneHotEncoder(**self.kwargs)
     self.ohe.fit(X[self.columns])
     return self