Esempio n. 1
0
    def test_evaluate(self):
        """
        Make sure that evaluate works.
        """
        model = self.model
        t = self.dtrain[self.target]
        p = model.predict(self.dtrain)
        self.sm_metrics = {
            "max_error": evaluation.max_error(t, p),
            "rmse": evaluation.rmse(t, p),
        }

        def check_metric(ans, metric):
            self.assertTrue(ans is not None)
            self.assertTrue(metric in ans)
            self.assertAlmostEqual(
                ans[metric],
                self.sm_metrics[metric],
                places=4,
                msg="%s = (%s,%s)" %
                (metric, ans[metric], self.sm_metrics[metric]),
            )

        # Default
        ans = model.evaluate(self.dtrain)
        self.assertEqual(sorted(ans.keys()), sorted(self.metrics))
        for m in self.metrics:
            check_metric(ans, m)

        # Individual
        for m in self.metrics:
            ans = model.evaluate(self.dtrain, metric=m)
            check_metric(ans, m)
Esempio n. 2
0
    def _test_regression_model(self, train, test, rmse_threshold, target='label'):
        # create
        model =tc.boosted_trees_regression.create(train, target=target,
                                        validation_set=test,
                                        **self.param)
        # predict
        pred = model.predict(test)
        rmse = evaluation.rmse(pred, test[target])
        self.assertLess(rmse, rmse_threshold)

        # evaluate
        rmse_eval = model.evaluate(test, metric='rmse')['rmse']
        self.assertTrue(rmse_eval < rmse_threshold)
        self.assertAlmostEqual(rmse_eval, rmse, delta=1e-2)
Esempio n. 3
0
    def _test_regression_model(self,
                               train,
                               test,
                               rmse_threshold,
                               target="label"):
        # create
        model = tc.random_forest_regression.create(train,
                                                   target=target,
                                                   validation_set=test,
                                                   **self.param)
        # predict
        pred = model.predict(test)
        pred_lst = model.predict(list(test))
        rmse = evaluation.rmse(pred, test[target])
        self.assertLess(rmse, rmse_threshold)

        # evaluate
        rmse_eval = model.evaluate(test, metric="rmse")["rmse"]
        self.assertTrue(rmse_eval < rmse_threshold)
        self.assertAlmostEqual(rmse_eval, rmse, delta=1e-2)