Ejemplo n.º 1
0
def test_internal_values_not_exposed():
    """Check that valid values that are for internal purpose, e.g. "warn" or
    "deprecated" are not exposed in the error message
    """
    @validate_params(
        {"param": [StrOptions({"auto", "warn"}, internal={"warn"})]})
    def f(param):
        pass

    with pytest.raises(ValueError, match="The 'param' parameter") as exc_info:
        f(param="bad")

    err_msg = str(exc_info.value)
    assert "a str among" in err_msg
    assert "auto" in err_msg
    assert "warn" not in err_msg

    # no error
    f(param="warn")

    @validate_params({"param": [int, StrOptions({"warn"}, internal={"warn"})]})
    def g(param):
        pass

    with pytest.raises(ValueError, match="The 'param' parameter") as exc_info:
        g(param="bad")

    err_msg = str(exc_info.value)
    assert "a str among" not in err_msg
    assert "warn" not in err_msg

    # no error
    g(param="warn")
Ejemplo n.º 2
0
def test_stroptions():
    """Sanity check for the StrOptions constraint"""
    options = StrOptions({"a", "b", "c"}, deprecated={"c"})
    assert options.is_satisfied_by("a")
    assert options.is_satisfied_by("c")
    assert not options.is_satisfied_by("d")

    assert "'c' (deprecated)" in str(options)
Ejemplo n.º 3
0
def test_stroptions_deprecated_internal_subset():
    """Check that the deprecated and internal parameters must be subsets of options."""
    with pytest.raises(ValueError,
                       match="deprecated options must be a subset"):
        StrOptions({"a", "b", "c"}, deprecated={"a", "d"})

    with pytest.raises(ValueError, match="internal options must be a subset"):
        StrOptions({"a", "b", "c"}, internal={"a", "d"})
Ejemplo n.º 4
0
def test_hidden_stroptions():
    """Check that we can have 2 StrOptions constraints, one being hidden."""
    @validate_params(
        {"param": [StrOptions({"auto"}),
                   Hidden(StrOptions({"warn"}))]})
    def f(param):
        pass

    # "auto" and "warn" are valid params
    f("auto")
    f("warn")

    with pytest.raises(ValueError, match="The 'param' parameter") as exc_info:
        f(param="bad")

    # the "warn" option is not exposed in the error message
    err_msg = str(exc_info.value)
    assert "auto" in err_msg
    assert "warn" not in err_msg
Ejemplo n.º 5
0
def test_stroptions_deprecated_subset():
    """Check that the deprecated parameter must be a subset of options."""
    with pytest.raises(ValueError,
                       match="deprecated options must be a subset"):
        StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
Ejemplo n.º 6
0
        (np.ndarray, "numpy.ndarray"),
    ],
)
def test_instances_of_type_human_readable(type, expected_type_name):
    """Check the string representation of the _InstancesOf constraint."""
    constraint = _InstancesOf(type)
    assert str(constraint) == f"an instance of '{expected_type_name}'"


@pytest.mark.parametrize(
    "constraint",
    [
        Interval(Real, None, 0, closed="left"),
        Interval(Real, 0, None, closed="left"),
        Interval(Real, None, None, closed="neither"),
        StrOptions({"a", "b", "c"}),
    ],
)
def test_generate_invalid_param_val(constraint):
    """Check that the value generated does not satisfy the constraint"""
    bad_value = generate_invalid_param_val(constraint)
    assert not constraint.is_satisfied_by(bad_value)


@pytest.mark.parametrize(
    "integer_interval, real_interval",
    [
        (
            Interval(Integral, None, 3, closed="right"),
            Interval(Real, -5, 5, closed="both"),
        ),
Ejemplo n.º 7
0
class KMeansL1L2(KMeans):
    """
    K-Means clustering with either norm L1 or L2.
    See notebook :ref:`kmeansl1rst` for an example.

    :param n_clusters: int, default=8
        The number of clusters to form as well as the number of
        centroids to generate.
    :param init: {'k-means++', 'random'} or ndarray of shape \
            (n_clusters, n_features), default='k-means++'
        Method for initialization, defaults to 'k-means++':

        'k-means++' : selects initial cluster centers for k-mean
        clustering in a smart way to speed up convergence. See section
        Notes in k_init for more details.

        'random': choose k observations (rows) at random from data for
        the initial centroids.

        If an ndarray is passed, it should be of shape (n_clusters, n_features)
        and gives the initial centers.

    :param n_init: int, default=10
        Number of time the k-means algorithm will be run with different
        centroid seeds. The final results will be the best output of
        n_init consecutive runs in terms of inertia.
    :param max_iter: int, default=300
        Maximum number of iterations of the k-means algorithm for a
        single run.
    :param tol: float, default=1e-4
        Relative tolerance with regards to inertia to declare convergence.
    :param precompute_distances: 'auto' or bool, default='auto'
        Precompute distances (faster but takes more memory).

        'auto' : do not precompute distances if n_samples * n_clusters > 12
        million. This corresponds to about 100MB overhead per job using
        double precision.

        True : always precompute distances.

        False : never precompute distances.

    :param verbose: int, default=0
        Verbosity mode.
    :param random_state: int, RandomState instance, default=None
        Determines random number generation for centroid initialization. Use
        an int to make the randomness deterministic.
        See :term:`Glossary <random_state>`.
    :param copy_x: bool, default=True
        When pre-computing distances it is more numerically accurate to center
        the data first.  If copy_x is True (default), then the original data is
        not modified, ensuring X is C-contiguous.  If False, the original data
        is modified, and put back before the function returns, but small
        numerical differences may be introduced by subtracting and then adding
        the data mean, in this case it will also not ensure that data is
        C-contiguous which may cause a significant slowdown.
    :param n_jobs: int, default=None
        The number of jobs to use for the computation. This works by computing
        each of the n_init runs in parallel.

        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.
    :param algorithm: {"auto", "full", "elkan"}, default="auto"
        K-means algorithm to use. The classical EM-style algorithm is "full".
        The "elkan" variation is more efficient by using the triangle
        inequality, but currently doesn't support sparse data. "auto" chooses
        "elkan" for dense data and "full" for sparse data.
    :param norm: {"L1", "L2"}
        The norm *L2* is identical to :epkg:`KMeans`.
        Norm *L1* uses a complete different path.

    Fitted attributes:

    * `cluster_centers_`: ndarray of shape (n_clusters, n_features)
        Coordinates of cluster centers. If the algorithm stops before fully
        converging (see ``tol`` and ``max_iter``), these will not be
        consistent with ``labels_``.
    * `labels_`: ndarray of shape (n_samples,)
        Labels of each point
    * `inertia_`: float
        Sum of squared distances of samples to their closest cluster center.
    * `n_iter_`: int
        Number of iterations run.
    """

    _parameter_constraints = {
        **getattr(KMeans, '_parameter_constraints', {}),
        "norm": [StrOptions({"L1", "L2"})],
    }

    def __init__(self,
                 n_clusters=8,
                 init='k-means++',
                 n_init=10,
                 max_iter=300,
                 tol=1e-4,
                 verbose=0,
                 random_state=None,
                 copy_x=True,
                 algorithm='full',
                 norm='L2'):

        KMeans.__init__(self,
                        n_clusters=n_clusters,
                        init=init,
                        n_init=n_init,
                        max_iter=max_iter,
                        tol=tol,
                        verbose=verbose,
                        random_state=random_state,
                        copy_x=copy_x,
                        algorithm=algorithm)
        self.norm = norm
        if self.norm == 'L1' and self.algorithm != 'full':
            raise NotImplementedError(  # pragma no cover
                "Only algorithm 'full' is implemented with norm 'l1'.")

    def fit(self, X, y=None, sample_weight=None):
        """
        Computes k-means clustering.

        :param X: array-like or sparse matrix, shape=(n_samples, n_features)
            Training instances to cluster. It must be noted that the data
            will be converted to C ordering, which will cause a memory
            copy if the given data is not C-contiguous.
        :param y: Ignored
            Not used, present here for API consistency by convention.
        :param sample_weight: array-like, shape (n_samples,), optional
            The weights for each observation in X. If None, all observations
            are assigned equal weight (default: None).
        :return: self
            Fitted estimator.
        """
        if self.norm == 'L2':
            KMeans.fit(self, X=X, y=y, sample_weight=sample_weight)
        elif self.norm == 'L1':
            self._fit_l1(X=X, y=y, sample_weight=sample_weight)
        else:
            raise NotImplementedError(  # pragma no cover
                "Norm is not 'L1' or 'L2' but '{}'.".format(self.norm))
        return self

    def _fit_l1(self, X, y=None, sample_weight=None):
        """
        Computes k-means clustering with norm `'L1'`.

        :param X: array-like or sparse matrix, shape=(n_samples, n_features)
            Training instances to cluster. It must be noted that the data
            will be converted to C ordering, which will cause a memory
            copy if the given data is not C-contiguous.
        :param y: Ignored
            Not used, present here for API consistency by convention.
        :param sample_weight: array-like, shape (n_samples,), optional
            The weights for each observation in X. If None, all observations
            are assigned equal weight (default: None).
        :return: self
            Fitted estimator.
        """
        random_state = check_random_state(self.random_state)

        n_init = self.n_init
        if n_init <= 0:
            raise ValueError(  # pragma no cover
                "Invalid number of initializations."
                " n_init=%d must be bigger than zero." % n_init)

        if self.max_iter <= 0:
            raise ValueError(  # pragma no cover
                'Number of iterations should be a positive number,'
                ' got %d instead' % self.max_iter)

        # avoid forcing order when copy_x=False
        order = "C" if self.copy_x else None
        X = check_array(X,
                        accept_sparse='csr',
                        dtype=[numpy.float64, numpy.float32],
                        order=order,
                        copy=self.copy_x)
        # verify that the number of samples given is larger than k
        if _num_samples(X) < self.n_clusters:
            raise ValueError(  # pragma no cover
                "n_samples=%d should be >= n_clusters=%d" %
                (_num_samples(X), self.n_clusters))

        tol = _tolerance(self.norm, X, self.tol)

        # Validate init array
        init = self.init
        if hasattr(init, '__array__'):
            init = check_array(init, dtype=X.dtype.type, copy=True)
            if hasattr(self, '_validate_center_shape'):
                self._validate_center_shape(  # pylint: disable=E1101
                    X, init)

            if n_init != 1:
                warnings.warn(  # pragma: no cover
                    'Explicit initial center position passed: '
                    'performing only one init in k-means instead of n_init=%d'
                    % n_init,
                    RuntimeWarning,
                    stacklevel=2)
                n_init = 1

        best_labels, best_inertia, best_centers = None, None, None
        algorithm = self.algorithm
        if self.n_clusters == 1:
            # elkan doesn't make sense for a single cluster, full will produce
            # the right result.
            algorithm = "full"  # pragma: no cover
        if algorithm == "auto":
            algorithm = "full"  # pragma: no cover
        if algorithm == "full":
            kmeans_single = _kmeans_single_lloyd
        else:
            raise ValueError(  # pragma no cover
                "Algorithm must be 'auto', 'full' or 'elkan', got"
                " %s" % str(algorithm))

        seeds = random_state.randint(numpy.iinfo(numpy.int32).max, size=n_init)

        for seed in seeds:
            # run a k-means once
            labels, inertia, centers, n_iter_ = kmeans_single(
                self.norm,
                X,
                sample_weight,
                n_clusters=self.n_clusters,
                max_iter=self.max_iter,
                init=init,
                verbose=self.verbose,
                tol=tol,
                random_state=seed)
            # determine if these results are the best so far
            if best_inertia is None or inertia < best_inertia:
                best_labels = labels.copy()
                best_centers = centers.copy()
                best_inertia = inertia
                best_n_iter = n_iter_

        distinct_clusters = len(set(best_labels))
        if distinct_clusters < self.n_clusters:
            warnings.warn(  # pragma no cover
                "Number of distinct clusters ({}) found smaller than "
                "n_clusters ({}). Possibly due to duplicate points "
                "in X.".format(distinct_clusters, self.n_clusters),
                ConvergenceWarning,
                stacklevel=2)

        self.cluster_centers_ = best_centers
        self.labels_ = best_labels
        self.inertia_ = best_inertia
        self.n_iter_ = best_n_iter
        return self

    def transform(self, X):
        """
        Transforms *X* to a cluster-distance space.

        In the new space, each dimension is the distance to the cluster
        centers.  Note that even if X is sparse, the array returned by
        `transform` will typically be dense.

        :param X: {array-like, sparse matrix} of shape (n_samples, n_features)
            New data to transform.
        :return: X_new : array, shape [n_samples, k]
            X transformed in the new space.
        """
        if self.norm == 'L2':
            return KMeans.transform(self, X)
        if self.norm == 'L1':
            return self._transform_l1(X)
        raise NotImplementedError(  # pragma no cover
            "Norm is not L1 or L2 but '{}'.".format(self.norm))

    def _transform_l1(self, X):
        """
        Returns the distance of each point in *X* to
        every fit clusters.
        """
        check_is_fitted(self)
        X = self._check_test_data(X)
        return manhattan_distances(X, self.cluster_centers_)

    def predict(self, X, sample_weight=None):
        """
        Predicts the closest cluster each sample in X belongs to.

        In the vector quantization literature, `cluster_centers_` is called
        the code book and each value returned by `predict` is the index of
        the closest code in the code book.

        :param X: {array-like, sparse matrix} of shape (n_samples, n_features)
            New data to predict.
        :param sample_weight: array-like, shape (n_samples,), optional
            The weights for each observation in X. If None, all observations
            are assigned equal weight (default: None), unused here
        :return: labels : array, shape [n_samples,]
            Index of the cluster each sample belongs to.
        """
        if self.norm == 'L2':
            return KMeans.predict(self, X)
        if self.norm == 'L1':
            return self._predict_l1(X, sample_weight=sample_weight)
        raise NotImplementedError(  # pragma no cover
            "Norm is not L1 or L2 but '{}'.".format(self.norm))

    def _predict_l1(self, X, sample_weight=None, return_distances=False):
        """
        Returns the distance of each point in *X* to
        every fit clusters.

        :param X: features
        :param sample_weight: (unused)
        :param return_distances: returns distances as well
        :return: labels or `labels, distances`
        """
        labels, mindist = pairwise_distances_argmin_min(
            X=X, Y=self.cluster_centers_, metric='manhattan')
        labels = labels.astype(numpy.int32, copy=False)
        if return_distances:
            return labels, mindist
        return labels
Ejemplo n.º 8
0
def test_stroptions_deprecated_internal_overlap():
    """Check that the internal and deprecated parameters are not allowed to overlap."""
    with pytest.raises(ValueError, match="should not overlap"):
        StrOptions({"a", "b", "c"}, deprecated={"b", "c"}, internal={"a", "b"})