Esempio n. 1
0
 def setUp(self):
     session.init("test_least_squared_error_loss")
     self.lse_loss = LeastSquaredErrorLoss()
     self.y_list = [i % 2 for i in range(100)]
     self.predict_list = [random.random() for i in range(100)]
     self.y = session.parallelize(self.y_list, include_key=False)
     self.predict = session.parallelize(self.predict_list, include_key=False)
Esempio n. 2
0
class TestLeastSquaredErrorLoss(unittest.TestCase):
    def setUp(self):
        session.init("test_least_squared_error_loss")
        self.lse_loss = LeastSquaredErrorLoss()
        self.y_list = [i % 2 for i in range(100)]
        self.predict_list = [random.random() for i in range(100)]
        self.y = session.parallelize(self.y_list, include_key=False)
        self.predict = session.parallelize(self.predict_list, include_key=False)

    def test_predict(self):
        for y in self.y_list:
            y_pred = self.lse_loss.predict(y)
            self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)

    def test_compute_gradient(self):
        for y, y_pred in zip(self.y_list, self.predict_list):
            lse_grad = self.lse_loss.compute_grad(y, y_pred)
            grad = 2 * (y_pred - y)
            self.assertTrue(np.fabs(lse_grad - grad) < consts.FLOAT_ZERO)

    def test_compute_hess(self):
        for y, y_pred in zip(self.y_list, self.predict_list):
            hess = 2
            lse_hess = self.lse_loss.compute_hess(y, y_pred)
            self.assertTrue(np.fabs(lse_hess - hess) < consts.FLOAT_ZERO)

    def test_compute_loss(self):
        sklearn_loss = metrics.mean_squared_error(self.y_list, self.predict_list)
        lse_loss = self.lse_loss.compute_loss(self.y, self.predict)
        self.assertTrue(np.fabs(lse_loss - sklearn_loss) < consts.FLOAT_ZERO)
Esempio n. 3
0
 def set_loss(self, objective_param):
     loss_type = objective_param.objective
     params = objective_param.params
     LOGGER.info("set objective, objective is {}".format(loss_type))
     if self.task_type == consts.CLASSIFICATION:
         if loss_type == "cross_entropy":
             if self.num_classes == 2:
                 self.loss = SigmoidBinaryCrossEntropyLoss()
             else:
                 self.loss = SoftmaxCrossEntropyLoss()
         else:
             raise NotImplementedError("objective %s not supported yet" %
                                       (loss_type))
     elif self.task_type == consts.REGRESSION:
         if loss_type == "lse":
             self.loss = LeastSquaredErrorLoss()
         elif loss_type == "lae":
             self.loss = LeastAbsoluteErrorLoss()
         elif loss_type == "huber":
             self.loss = HuberLoss(params[0])
         elif loss_type == "fair":
             self.loss = FairLoss(params[0])
         elif loss_type == "tweedie":
             self.loss = TweedieLoss(params[0])
         elif loss_type == "log_cosh":
             self.loss = LogCoshLoss()
         else:
             raise NotImplementedError("objective %s not supported yet" %
                                       (loss_type))
     else:
         raise NotImplementedError("objective %s not supported yet" %
                                   (loss_type))