示例#1
0
    def test_no_refit(self):
        # Test that GSCV can be used for model selection alone without
        # refitting
        clf = MockClassifier()
        grid_search = TuneGridSearchCV(clf, {"foo_param": [1, 2, 3]},
                                       refit=False,
                                       cv=3)
        grid_search.fit(X, y)
        self.assertFalse(hasattr(grid_search, "best_estimator_"))
        self.assertFalse(hasattr(grid_search, "best_index_"))
        self.assertFalse(hasattr(grid_search, "best_score_"))
        self.assertFalse(hasattr(grid_search, "best_params_"))

        # Make sure the predict/transform etc fns raise meaningful error msg
        for fn_name in (
                "predict",
                "predict_proba",
                "predict_log_proba",
                "transform",
                "inverse_transform",
        ):
            with self.assertRaises(NotFittedError) as exc:
                getattr(grid_search, fn_name)(X)
            self.assertTrue(
                ("refit=False. %s is available only after refitting on the "
                 "best parameters" % fn_name) in str(exc.exception))
示例#2
0
    def test_trivial_cv_results_attr(self):
        # Test search over a "grid" with only one point.
        # Non-regression test: grid_scores_ wouldn't be set by
        # dcv.GridSearchCV.
        clf = MockClassifier()
        grid_search = TuneGridSearchCV(clf, {"foo_param": [1]}, cv=3)
        grid_search.fit(X, y)
        self.assertTrue(hasattr(grid_search, "cv_results_"))

        random_search = TuneSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=3)
        random_search.fit(X, y)
        self.assertTrue(hasattr(random_search, "cv_results_"))
示例#3
0
 def test_tune_search_spaces(self):
     # Test mixed search spaces
     clf = MockClassifier()
     foo = [1, 2, 3]
     bar = [1, 2]
     grid_search = TuneGridSearchCV(clf, {
         "foo_param": tune.grid_search(foo),
         "bar_param": bar
     },
                                    refit=False,
                                    cv=3)
     grid_search.fit(X, y)
     params = grid_search.cv_results_["params"]
     results_grid = {k: {dic[k] for dic in params} for k in params[0]}
     self.assertTrue(len(results_grid["foo_param"]) == len(foo))
     self.assertTrue(len(results_grid["bar_param"]) == len(bar))
示例#4
0
    def test_grid_search(self):
        # Test that the best estimator contains the right value for foo_param
        clf = MockClassifier()
        grid_search = TuneGridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3)
        # make sure it selects the smallest parameter in case of ties
        grid_search.fit(X, y)
        self.assertEqual(grid_search.best_estimator_.foo_param, 2)

        assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
                           [1, 2, 3])

        # Smoke test the score etc:
        grid_search.score(X, y)
        grid_search.predict_proba(X)
        grid_search.decision_function(X)
        grid_search.transform(X)

        # Test exception handling on scoring
        grid_search.scoring = "sklearn"
        with self.assertRaises(ValueError):
            grid_search.fit(X, y)