コード例 #1
0
 def setUp(self):
     session.init("test_cross_entropy")
     self.sigmoid_loss = SigmoidBinaryCrossEntropyLoss()
     self.y_list = [i % 2 for i in range(100)]
     self.predict_list = [random.random() for i in range(100)]
     self.y = session.parallelize(self.y_list, include_key=False, partition=16)
     self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
コード例 #2
0
class TestSigmoidBinaryCrossEntropyLoss(unittest.TestCase):
    def setUp(self):
        session.init("test_cross_entropy")
        self.sigmoid_loss = SigmoidBinaryCrossEntropyLoss()
        self.y_list = [i % 2 for i in range(100)]
        self.predict_list = [random.random() for i in range(100)]
        self.y = session.parallelize(self.y_list, include_key=False, partition=16)
        self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)

    def test_predict(self):
        for i in range(1, 10):
            np_v = 1.0 / (1.0 + np.exp(-1.0 / i))
            self.assertTrue(np.fabs(self.sigmoid_loss.predict(1.0 / i) - np_v) < consts.FLOAT_ZERO)

    def test_compute_gradient(self):
        for i in range(10):
            pred = random.random()
            y = i % 2
            grad = pred - y
            self.assertTrue(np.fabs(self.sigmoid_loss.compute_grad(y, pred) - grad) < consts.FLOAT_ZERO)

    def test_compute_hess(self):
        for i in range(10):
            pred = random.random()
            y = i % 2
            hess = pred * (1 - pred)
            self.assertTrue(np.fabs(self.sigmoid_loss.compute_hess(y, pred) - hess) < consts.FLOAT_ZERO)

    def test_compute_loss(self):
        sklearn_loss = metrics.log_loss(self.y_list, self.predict_list)
        sigmoid_loss = self.sigmoid_loss.compute_loss(self.y, self.predict)
        self.assertTrue(np.fabs(sigmoid_loss - sklearn_loss) < consts.FLOAT_ZERO)

    def tearDown(self):
        session.stop()
コード例 #3
0
ファイル: cross_entropy_test.py プロジェクト: 03040081/FATE
 def setUp(self):
     eggroll.init("test_cross_entropy")
     self.sigmoid_loss = SigmoidBinaryCrossEntropyLoss()
     self.y_list = [i % 2 for i in range(100)]
     self.predict_list = [random.random() for i in range(100)]
     self.y = eggroll.parallelize(self.y_list, include_key=False)
     self.predict = eggroll.parallelize(self.predict_list,
                                        include_key=False)
コード例 #4
0
 def set_loss(self, objective_param):
     loss_type = objective_param.objective
     params = objective_param.params
     LOGGER.info("set objective, objective is {}".format(loss_type))
     if self.task_type == consts.CLASSIFICATION:
         if loss_type == "cross_entropy":
             if self.num_classes == 2:
                 self.loss = SigmoidBinaryCrossEntropyLoss()
             else:
                 self.loss = SoftmaxCrossEntropyLoss()
         else:
             raise NotImplementedError("objective %s not supported yet" %
                                       (loss_type))
     elif self.task_type == consts.REGRESSION:
         if loss_type == "lse":
             self.loss = LeastSquaredErrorLoss()
         elif loss_type == "lae":
             self.loss = LeastAbsoluteErrorLoss()
         elif loss_type == "huber":
             self.loss = HuberLoss(params[0])
         elif loss_type == "fair":
             self.loss = FairLoss(params[0])
         elif loss_type == "tweedie":
             self.loss = TweedieLoss(params[0])
         elif loss_type == "log_cosh":
             self.loss = LogCoshLoss()
         else:
             raise NotImplementedError("objective %s not supported yet" %
                                       (loss_type))
     else:
         raise NotImplementedError("objective %s not supported yet" %
                                   (loss_type))
コード例 #5
0
 def set_loss(self, loss_type):
     LOGGER.info("set loss, loss type is {}".format(loss_type))
     if self.task_type == "classification":
         if loss_type == "cross_entropy":
             if self.num_classes == 2:
                 self.loss = SigmoidBinaryCrossEntropyLoss()
             else:
                 self.loss = SoftmaxCrossEntropyLoss()
         else:
             raise NotImplementedError("Loss type %s not supported yet" %
                                       (self.loss_type))
     else:
         raise NotImplementedError("Loss type %s not supported yet" %
                                   (self.loss_type))