Пример #1
0
def test_embed_dim(estimator, build_dataset):
    # Checks that the the dimension of the output space is as expected
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    assert model.transform(X).shape == X.shape

    # assert that ValueError is thrown if input shape is 1D
    context = make_context(estimator)
    err_msg = ("2D array of formed points expected{}. Found 1D array "
               "instead:\ninput={}. Reshape your data and/or use a "
               "preprocessor.\n".format(context, X[0]))
    with pytest.raises(ValueError) as raised_error:
        model.score_pairs(model.transform(X[0, :]))
    assert str(raised_error.value) == err_msg
    # we test that the shape is also OK when doing dimensionality reduction
    if hasattr(model, 'n_components'):
        model.set_params(n_components=2)
        model.fit(*remove_y(estimator, input_data, labels))
        assert model.transform(X).shape == (X.shape[0], 2)
        # assert that ValueError is thrown if input shape is 1D
        with pytest.raises(ValueError) as raised_error:
            model.transform(model.transform(X[0, :]))
        assert str(raised_error.value) == err_msg
Пример #2
0
def test_components_is_2D(estimator, build_dataset):
    """Tests that the transformation matrix of metric learners is 2D"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    # test that it works for X.shape[1] features
    model.fit(*remove_y(estimator, input_data, labels))
    assert model.components_.shape == (X.shape[1], X.shape[1])

    # test that it works for 1 feature
    trunc_data = input_data[..., :1]
    # we drop duplicates that might have been formed, i.e. of the form
    # aabc or abcc or aabb for quadruplets, and aa for pairs.

    if isinstance(estimator, _QuadrupletsClassifierMixin):
        pairs_idx = [[0, 1], [2, 3]]
    elif isinstance(estimator, _TripletsClassifierMixin):
        pairs_idx = [[0, 1], [0, 2]]
    elif isinstance(estimator, _PairsClassifierMixin):
        pairs_idx = [[0, 1]]
    else:
        pairs_idx = []

    for pair_idx in pairs_idx:
        pairs = trunc_data[:, pair_idx, :]
        diffs = pairs[:, 1, :] - pairs[:, 0, :]
        to_keep = np.abs(diffs.ravel()) > 1e-9
        trunc_data = trunc_data[to_keep]
        labels = labels[to_keep]

    model.fit(*remove_y(estimator, trunc_data, labels))
    assert model.components_.shape == (1, 1)  # the components must be 2D
Пример #3
0
def test_n_components(estimator, build_dataset):
    """Check that estimators that have a n_components parameters can use it
  and that it actually works as expected"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)

    if hasattr(model, 'n_components'):
        set_random_state(model)
        model.set_params(n_components=None)
        model.fit(*remove_y(model, input_data, labels))
        assert model.components_.shape == (X.shape[1], X.shape[1])

        model = clone(estimator)
        set_random_state(model)
        model.set_params(n_components=X.shape[1] - 1)
        model.fit(*remove_y(model, input_data, labels))
        assert model.components_.shape == (X.shape[1] - 1, X.shape[1])

        model = clone(estimator)
        set_random_state(model)
        model.set_params(n_components=X.shape[1] + 1)
        with pytest.raises(ValueError) as expected_err:
            model.fit(*remove_y(model, input_data, labels))
        assert (str(expected_err.value) ==
                'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))

        model = clone(estimator)
        set_random_state(model)
        model.set_params(n_components=0)
        with pytest.raises(ValueError) as expected_err:
            model.fit(*remove_y(model, input_data, labels))
        assert (str(expected_err.value) ==
                'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
Пример #4
0
def test_cross_validation_is_finite(estimator, build_dataset):
    """Tests that validation on metric-learn estimators returns something finite
  """
    input_data, labels, preprocessor, _ = build_dataset()
    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    set_random_state(estimator)
    assert np.isfinite(
        cross_val_score(estimator, *remove_y(estimator, input_data,
                                             labels))).all()
    assert np.isfinite(
        cross_val_predict(estimator, *remove_y(estimator, input_data,
                                               labels))).all()
Пример #5
0
def test_array_like_inputs(estimator, build_dataset, with_preprocessor):
    """Test that metric-learners can have as input (of all functions that are
  applied on data) any array-like object."""
    input_data, labels, preprocessor, X = build_dataset(with_preprocessor)

    # we subsample the data for the test to be more efficient
    input_data, _, labels, _ = train_test_split(input_data,
                                                labels,
                                                train_size=40,
                                                random_state=42)
    X = X[:10]

    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    set_random_state(estimator)
    input_variants, label_variants = generate_array_like(input_data, labels)
    for input_variant in input_variants:
        for label_variant in label_variants:
            estimator.fit(*remove_y(estimator, input_variant, label_variant))
        if hasattr(estimator, "predict"):
            estimator.predict(input_variant)
        if hasattr(estimator, "predict_proba"):
            estimator.predict_proba(input_variant)  # anticipation in case some
            # time we have that, or if ppl want to contribute with new algorithms
            # it will be checked automatically
        if hasattr(estimator, "decision_function"):
            estimator.decision_function(input_variant)
        if hasattr(estimator, "score"):
            for label_variant in label_variants:
                estimator.score(
                    *remove_y(estimator, input_variant, label_variant))

    X_variants, _ = generate_array_like(X)
    for X_variant in X_variants:
        estimator.transform(X_variant)

    pairs = np.array([[X[0], X[1]], [X[0], X[2]]])
    pairs_variants, _ = generate_array_like(pairs)

    not_implemented_msg = ""
    # Todo in 0.7.0: Change 'not_implemented_msg' for the message that says
    # "This learner does not have pair_distance"

    for pairs_variant in pairs_variants:
        estimator.pair_score(pairs_variant)  # All learners have pair_score

        # But not all of them will have pair_distance
        try:
            estimator.pair_distance(pairs_variant)
        except Exception as raised_exception:
            assert raised_exception.value.args[0] == not_implemented_msg
Пример #6
0
def test_get_metric_works_does_not_raise(estimator, build_dataset):
    """Tests that the metric returned by get_metric does not raise errors (or
  warnings) similarly to the distance functions in scipy.spatial.distance"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(model, input_data, labels))
    metric = model.get_metric()

    list_test_get_metric_doesnt_raise = [(X[0], X[1]),
                                         (X[0].tolist(), X[1].tolist()),
                                         (X[0][None], X[1][None])]

    for u, v in list_test_get_metric_doesnt_raise:
        with pytest.warns(None) as record:
            metric(u, v)
        assert len(record) == 0

    # Test that the scalar case works
    model.components_ = np.array([3.1])
    metric = model.get_metric()
    for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
        with pytest.warns(None) as record:
            metric(u, v)
        assert len(record) == 0
Пример #7
0
def test_embed_finite(estimator, build_dataset):
    # Checks that embed returns vectors with finite values
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    assert np.isfinite(model.transform(X)).all()
Пример #8
0
def test_deterministic_initialization(estimator, build_dataset):
    """Test that estimators that have a prior or an init are deterministic
  when it is set to to random and when the random_state is fixed."""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    if hasattr(estimator, 'init'):
        model.set_params(init='random')
    if hasattr(estimator, 'prior'):
        model.set_params(prior='random')
    model1 = clone(model)
    set_random_state(model1, 42)
    model1 = model1.fit(*remove_y(model, input_data, labels))
    model2 = clone(model)
    set_random_state(model2, 42)
    model2 = model2.fit(*remove_y(model, input_data, labels))
    np.testing.assert_allclose(model1.get_mahalanobis_matrix(),
                               model2.get_mahalanobis_matrix())
Пример #9
0
def test_estimators_fit_returns_self(estimator, build_dataset,
                                     with_preprocessor):
    """Check if self is returned when calling fit"""
    # Adapted from scikit-learn
    input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)
    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    assert estimator.fit(*remove_y(estimator, input_data, labels)) is estimator
Пример #10
0
def test_score_pairs_finite(estimator, build_dataset):
    # tests that the score is finite
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    pairs = np.array(list(product(X, X)))
    assert np.isfinite(model.score_pairs(pairs)).all()
Пример #11
0
def test_get_metric_compatible_with_scikit_learn(estimator, build_dataset):
    """Check that the metric returned by get_metric is compatible with
  scikit-learn's algorithms using a custom metric, DBSCAN for instance"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    clustering = DBSCAN(metric=model.get_metric())
    clustering.fit(X)
Пример #12
0
def test_embed_toy_example(estimator, build_dataset):
    # Checks that embed works on a toy example
    input_data, labels, _, X = build_dataset()
    n_samples = 20
    X = X[:n_samples]
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    embedded_points = X.dot(model.components_.T)
    assert_array_almost_equal(model.transform(X), embedded_points)
Пример #13
0
def test_embed_is_linear(estimator, build_dataset):
    # Checks that the embedding is linear
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    assert_array_almost_equal(
        model.transform(X[:10] + X[10:20]),
        model.transform(X[:10]) + model.transform(X[10:20]))
    assert_array_almost_equal(model.transform(5 * X[:10]),
                              5 * model.transform(X[:10]))
Пример #14
0
def test_get_metric_is_independent_from_metric_learner(estimator,
                                                       build_dataset):
    """Tests that the get_metric method returns a function that is independent
  from the original metric learner"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)

    # we fit the metric learner on it and then we compute the metric on some
    # points
    model.fit(*remove_y(model, input_data, labels))
    metric = model.get_metric()
    score = metric(X[0], X[1])

    # then we refit the estimator on another dataset
    model.fit(*remove_y(model, np.sin(input_data), labels))

    # we recompute the distance between the two points: it should be the same
    score_bis = metric(X[0], X[1])
    assert score_bis == score
Пример #15
0
def test_cross_validation_manual_vs_scikit(estimator, build_dataset,
                                           with_preprocessor):
    """Tests that if we make a manual cross-validation, the result will be the
  same as scikit-learn's cross-validation (some code for generating the
  folds is taken from scikit-learn).
  """
    if any(hasattr(estimator, method) for method in ["predict", "score"]):
        input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)
        estimator = clone(estimator)
        estimator.set_params(preprocessor=preprocessor)
        set_random_state(estimator)
        n_splits = 3
        kfold = KFold(shuffle=False, n_splits=n_splits)
        n_samples = input_data.shape[0]
        fold_sizes = (n_samples // n_splits) * np.ones(n_splits,
                                                       dtype=np.int64)
        fold_sizes[:n_samples % n_splits] += 1
        current = 0
        scores, predictions = [], np.zeros(input_data.shape[0])
        for fold_size in fold_sizes:
            start, stop = current, current + fold_size
            current = stop
            test_slice = slice(start, stop)
            train_mask = np.ones(input_data.shape[0], bool)
            train_mask[test_slice] = False
            y_train, y_test = labels[train_mask], labels[test_slice]
            estimator.fit(
                *remove_y(estimator, input_data[train_mask], y_train))
            if hasattr(estimator, "score"):
                scores.append(
                    estimator.score(
                        *remove_y(estimator, input_data[test_slice], y_test)))
            if hasattr(estimator, "predict"):
                predictions[test_slice] = estimator.predict(
                    input_data[test_slice])
        if hasattr(estimator, "score"):
            assert all(scores == cross_val_score(
                estimator, *remove_y(estimator, input_data, labels), cv=kfold))
        if hasattr(estimator, "predict"):
            assert all(predictions == cross_val_predict(
                estimator, *remove_y(estimator, input_data, labels), cv=kfold))
Пример #16
0
def test_array_like_inputs(estimator, build_dataset, with_preprocessor):
    """Test that metric-learners can have as input (of all functions that are
  applied on data) any array-like object."""
    input_data, labels, preprocessor, X = build_dataset(with_preprocessor)

    # we subsample the data for the test to be more efficient
    input_data, _, labels, _ = train_test_split(input_data,
                                                labels,
                                                train_size=40,
                                                random_state=42)
    X = X[:10]

    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    set_random_state(estimator)
    input_variants, label_variants = generate_array_like(input_data, labels)
    for input_variant in input_variants:
        for label_variant in label_variants:
            estimator.fit(*remove_y(estimator, input_variant, label_variant))
        if hasattr(estimator, "predict"):
            estimator.predict(input_variant)
        if hasattr(estimator, "predict_proba"):
            estimator.predict_proba(input_variant)  # anticipation in case some
            # time we have that, or if ppl want to contribute with new algorithms
            # it will be checked automatically
        if hasattr(estimator, "decision_function"):
            estimator.decision_function(input_variant)
        if hasattr(estimator, "score"):
            for label_variant in label_variants:
                estimator.score(
                    *remove_y(estimator, input_variant, label_variant))

    X_variants, _ = generate_array_like(X)
    for X_variant in X_variants:
        estimator.transform(X_variant)

    pairs = np.array([[X[0], X[1]], [X[0], X[2]]])
    pairs_variants, _ = generate_array_like(pairs)
    for pairs_variant in pairs_variants:
        estimator.score_pairs(pairs_variant)
Пример #17
0
def test_score_pairs_toy_example(estimator, build_dataset):
    # Checks that score_pairs works on a toy example
    input_data, labels, _, X = build_dataset()
    n_samples = 20
    X = X[:n_samples]
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    pairs = np.stack([X[:10], X[10:20]], axis=1)
    embedded_pairs = pairs.dot(model.components_.T)
    distances = np.sqrt(
        np.sum((embedded_pairs[:, 1] - embedded_pairs[:, 0])**2, axis=-1))
    assert_array_almost_equal(model.score_pairs(pairs), distances)
Пример #18
0
def test_simple_estimator(estimator, build_dataset, with_preprocessor):
    """Tests that fit, predict and scoring works.
  """
    if any(hasattr(estimator, method) for method in ["predict", "score"]):
        input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)
        (tuples_train, tuples_test, y_train,
         y_test) = train_test_split(input_data, labels, random_state=RNG)
        estimator = clone(estimator)
        estimator.set_params(preprocessor=preprocessor)
        set_random_state(estimator)

        estimator.fit(*remove_y(estimator, tuples_train, y_train))
        check_score(estimator, tuples_test, y_test)
        check_predict(estimator, tuples_test)
Пример #19
0
def test_pair_distance_pair_score_equivalent(estimator, build_dataset):
    """
  For Mahalanobis learners, pair_score should be equivalent to the
  opposite of the pair_distance result.
  """
    input_data, labels, _, X = build_dataset()
    n_samples = 20
    X = X[:n_samples]
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))

    distances = model.pair_distance(np.array(list(product(X, X))))
    scores = model.pair_score(np.array(list(product(X, X))))

    assert_array_equal(distances, -1 * scores)
Пример #20
0
def test_get_squared_metric(estimator, build_dataset):
    """Test that the squared metric returned is indeed the square of the
  metric"""
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    metric = model.get_metric()

    n_features = X.shape[1]
    for seed in range(10):
        rng = np.random.RandomState(seed)
        a, b = (rng.randn(n_features) for _ in range(2))
        assert_allclose(metric(a, b, squared=True),
                        metric(a, b, squared=False)**2,
                        rtol=1e-15)
Пример #21
0
def test_get_metric_equivalent_to_explicit_mahalanobis(estimator,
                                                       build_dataset):
    """Tests that using the get_metric method of mahalanobis metric learners is
  equivalent to explicitely calling scipy's mahalanobis metric
  """
    rng = np.random.RandomState(42)
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    metric = model.get_metric()
    n_features = X.shape[1]
    a, b = (rng.randn(n_features), rng.randn(n_features))
    expected_dist = mahalanobis(a[None],
                                b[None],
                                VI=model.get_mahalanobis_matrix())
    assert_allclose(metric(a, b), expected_dist, rtol=1e-13)
Пример #22
0
def test_score_pairs_dim(estimator, build_dataset):
    # scoring of 3D arrays should return 1D array (several tuples),
    # and scoring of 2D arrays (one tuple) should return an error (like
    # scikit-learn's error when scoring 1D arrays)
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    tuples = np.array(list(product(X, X)))
    assert model.score_pairs(tuples).shape == (tuples.shape[0], )
    context = make_context(estimator)
    msg = ("3D array of formed tuples expected{}. Found 2D array "
           "instead:\ninput={}. Reshape your data and/or use a preprocessor.\n"
           .format(context, tuples[1]))
    with pytest.raises(ValueError) as raised_error:
        model.score_pairs(tuples[1])
    assert str(raised_error.value) == msg
Пример #23
0
def test_dont_overwrite_parameters(estimator, build_dataset,
                                   with_preprocessor):
    # Adapted from scikit-learn
    # check that fit method only changes or sets private attributes
    input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)
    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    if hasattr(estimator, "n_components"):
        estimator.n_components = 1
    dict_before_fit = estimator.__dict__.copy()

    estimator.fit(*remove_y(estimator, input_data, labels))
    dict_after_fit = estimator.__dict__

    public_keys_after_fit = [
        key for key in dict_after_fit.keys() if is_public_parameter(key)
    ]

    attrs_added_by_fit = [
        key for key in public_keys_after_fit
        if key not in dict_before_fit.keys()
    ]

    # check that fit doesn't add any public attribute
    assert not attrs_added_by_fit, (
        "Estimator adds public attribute(s) during"
        " the fit method."
        " Estimators are only allowed to add private "
        "attributes"
        " either started with _ or ended"
        " with _ but %s added" % ', '.join(attrs_added_by_fit))

    # check that fit doesn't change any public attribute
    attrs_changed_by_fit = [
        key for key in public_keys_after_fit
        if (dict_before_fit[key] is not dict_after_fit[key])
    ]

    assert not attrs_changed_by_fit, (
        "Estimator changes public attribute(s) during"
        " the fit method. Estimators are only allowed"
        " to change attributes started"
        " or ended with _, but"
        " %s changed" % ', '.join(attrs_changed_by_fit))
Пример #24
0
def test_get_metric_raises_error(estimator, build_dataset):
  """Tests that the metric returned by get_metric raises errors similar to
  the distance functions in scipy.spatial.distance"""
  input_data, labels, _, X = build_dataset()
  model = clone(estimator)
  set_random_state(model)
  model.fit(*remove_y(model, input_data, labels))
  metric = model.get_metric()

  list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]),  # vectors with
                                 # different dimensions
                                 (X[0:4], X[1:5]),  # 2D vectors
                                 (X[0].tolist() + [5.2], X[1] + [7.2])]
  # vectors of same dimension but incompatible with what the metric learner
  # was trained on

  for u, v in list_test_get_metric_raises:
    with pytest.raises(ValueError):
      metric(u, v)
Пример #25
0
def test_score_pairs_warning(estimator, build_dataset):
  """Tests that score_pairs returns a FutureWarning regarding deprecation.
  Also that score_pairs and pair_distance have the same behaviour"""
  input_data, labels, _, X = build_dataset()
  model = clone(estimator)
  set_random_state(model)

  # We fit the metric learner on it and then we call score_pairs on some
  # points
  model.fit(*remove_y(model, input_data, labels))

  msg = ("score_pairs will be deprecated in release 0.7.0. "
         "Use pair_score to compute similarity scores, or "
         "pair_distances to compute distances.")
  with pytest.warns(FutureWarning) as raised_warning:
    score = model.score_pairs([[X[0], X[1]], ])
    dist = model.pair_distance([[X[0], X[1]], ])
    assert array_equal(score, dist)
  assert any([str(warning.message) == msg for warning in raised_warning])
Пример #26
0
def test_score_pairs_pairwise(estimator, build_dataset):
    # Computing pairwise scores should return a euclidean distance matrix.
    input_data, labels, _, X = build_dataset()
    n_samples = 20
    X = X[:n_samples]
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))

    pairwise = model.score_pairs(np.array(list(product(X, X))))\
        .reshape(n_samples, n_samples)

    check_is_distance_matrix(pairwise)

    # a necessary condition for euclidean distance matrices: (see
    # https://en.wikipedia.org/wiki/Euclidean_distance_matrix)
    assert np.linalg.matrix_rank(pairwise**2) <= min(X.shape) + 2

    # assert that this distance is coherent with pdist on embeddings
    assert_array_almost_equal(squareform(pairwise), pdist(model.transform(X)))
Пример #27
0
def test_get_metric_is_pseudo_metric(estimator, build_dataset):
    """Tests that the get_metric method of mahalanobis metric learners returns a
  pseudo-metric (metric but without one side of the equivalence of
  the identity of indiscernables property)
  """
    input_data, labels, _, X = build_dataset()
    model = clone(estimator)
    set_random_state(model)
    model.fit(*remove_y(estimator, input_data, labels))
    metric = model.get_metric()

    n_features = X.shape[1]
    for seed in range(10):
        rng = np.random.RandomState(seed)
        a, b, c = (rng.randn(n_features) for _ in range(3))
        assert metric(a, b) >= 0  # positivity
        assert metric(a, b) == metric(b, a)  # symmetry
        # one side of identity indiscernables: x == y => d(x, y) == 0. The other
        # side of the equivalence is not always true for Mahalanobis distances.
        assert metric(a, a) == 0
        # triangular inequality
        assert (metric(a, c) < metric(a, b) + metric(b, c) or np.isclose(
            metric(a, c), metric(a, b) + metric(b, c), rtol=1e-20))
Пример #28
0
def test_dict_unchanged(estimator, build_dataset, with_preprocessor):
    # Adapted from scikit-learn
    (input_data, labels, preprocessor,
     to_transform) = build_dataset(with_preprocessor)
    estimator = clone(estimator)
    estimator.set_params(preprocessor=preprocessor)
    if hasattr(estimator, "n_components"):
        estimator.n_components = 1
    estimator.fit(*remove_y(estimator, input_data, labels))

    def check_dict():
        assert estimator.__dict__ == dict_before, (
            "Estimator changes __dict__ during %s" % method)

    for method in ["predict", "decision_function", "predict_proba"]:
        if hasattr(estimator, method):
            dict_before = estimator.__dict__.copy()
            getattr(estimator, method)(input_data)
            check_dict()
    if hasattr(estimator, "transform"):
        dict_before = estimator.__dict__.copy()
        # we transform only dataset of points
        estimator.transform(to_transform)
        check_dict()
Пример #29
0
def check_score(estimator, tuples, y):
    if hasattr(estimator, "score"):
        score = estimator.score(*remove_y(estimator, tuples, y))
        assert np.isfinite(score)