def setUp(self): session.init("test_cross_entropy") self.softmax_loss = SoftmaxCrossEntropyLoss() self.y_list = [i % 5 for i in range(100)] self.predict_list = [np.array([random.random() for i in range(5)]) for j in range(100)] self.y = session.parallelize(self.y_list, include_key=False, partition=16) self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
class TestSoftmaxCrossEntropyLoss(unittest.TestCase): def setUp(self): session.init("test_cross_entropy") self.softmax_loss = SoftmaxCrossEntropyLoss() self.y_list = [i % 5 for i in range(100)] self.predict_list = [ np.array([random.random() for i in range(5)]) for j in range(100) ] self.y = session.parallelize(self.y_list, include_key=False, partition=16) self.predict = session.parallelize(self.predict_list, include_key=False, partition=16) def test_predict(self): for i in range(10): list = [random.random() for j in range(5)] pred_arr = np.asarray(list, dtype='float64') mx = pred_arr.max() predict = np.exp(pred_arr - mx) / sum(np.exp(pred_arr - mx)) softmaxloss_predict = self.softmax_loss.predict(pred_arr) self.assertTrue( np.fabs(predict - softmaxloss_predict).all() < consts.FLOAT_ZERO) def test_compute_grad(self): for i in range(10): pred = np.asarray([random.random() for j in range(5)], dtype="float64") label = random.randint(0, 4) softmaxloss_grad = self.softmax_loss.compute_grad(label, pred) grad = pred.copy() grad[label] -= 1 self.assertTrue( np.fabs(grad - softmaxloss_grad).all() < consts.FLOAT_ZERO) def test_compute_hess(self): for i in range(10): pred = np.asarray([random.random() for j in range(5)], dtype='float64') label = random.randint(0, 4) softmaxloss_hess = self.softmax_loss.compute_hess(label, pred) hess = pred * (1 - pred) self.assertTrue( np.fabs(hess - softmaxloss_hess).all() < consts.FLOAT_ZERO) def test_compute_loss(self): softmax_loss = self.softmax_loss.compute_loss(self.y, self.predict) loss = sum(-np.log(pred[yi]) for yi, pred in zip(self.y_list, self.predict_list)) / len( self.y_list) self.assertTrue(np.fabs(softmax_loss - loss) < consts.FLOAT_ZERO) def tearDown(self): session.stop()
def set_loss(self, objective_param): loss_type = objective_param.objective params = objective_param.params LOGGER.info("set objective, objective is {}".format(loss_type)) if self.task_type == consts.CLASSIFICATION: if loss_type == "cross_entropy": if self.num_classes == 2: self.loss = SigmoidBinaryCrossEntropyLoss() else: self.loss = SoftmaxCrossEntropyLoss() else: raise NotImplementedError("objective %s not supported yet" % (loss_type)) elif self.task_type == consts.REGRESSION: if loss_type == "lse": self.loss = LeastSquaredErrorLoss() elif loss_type == "lae": self.loss = LeastAbsoluteErrorLoss() elif loss_type == "huber": self.loss = HuberLoss(params[0]) elif loss_type == "fair": self.loss = FairLoss(params[0]) elif loss_type == "tweedie": self.loss = TweedieLoss(params[0]) elif loss_type == "log_cosh": self.loss = LogCoshLoss() else: raise NotImplementedError("objective %s not supported yet" % (loss_type)) else: raise NotImplementedError("objective %s not supported yet" % (loss_type))
def set_loss(self, loss_type): LOGGER.info("set loss, loss type is {}".format(loss_type)) if self.task_type == "classification": if loss_type == "cross_entropy": if self.num_classes == 2: self.loss = SigmoidBinaryCrossEntropyLoss() else: self.loss = SoftmaxCrossEntropyLoss() else: raise NotImplementedError("Loss type %s not supported yet" % (self.loss_type)) else: raise NotImplementedError("Loss type %s not supported yet" % (self.loss_type))