def test_softmax_loss(self): scores = [[1., 3., 2.], [1., 2., 3.], [1., 2., 3.]] labels = [[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]] weights = [[2.], [1.], [1.]] loss = losses.SoftmaxLoss() self.assertAlmostEqual( loss(labels, scores).numpy(), -(ln(_softmax(scores[0])[2]) + ln(_softmax(scores[1])[2]) * 2.) / 3., places=5) self.assertAlmostEqual(loss(labels, scores, weights).numpy(), -(ln(_softmax(scores[0])[2]) * 2. + ln(_softmax(scores[1])[2]) * 2. * 1.) / 3., places=5) # Test LambdaWeight. rank_discount_fn = lambda r: 1. / tf.math.log1p(r) lambda_weight = losses.DCGLambdaWeight( rank_discount_fn=rank_discount_fn) loss = losses.SoftmaxLoss(lambda_weight=lambda_weight) self.assertAlmostEqual( loss(labels, scores).numpy(), -(ln(_softmax(scores[0])[2]) / ln(1. + 2.) + ln(_softmax(scores[1])[2]) * 2. / ln(1. + 1.)) / 3., places=5)
def setUp(self): super(FunctionalRankingModelTest, self).setUp() self.context_feature_columns = _context_feature_columns() self.example_feature_columns = _example_feature_columns() self.optimizer = tf.keras.optimizers.Adagrad() self.loss = losses.SoftmaxLoss() self.metrics = [metrics.NDCGMetric("ndcg_5", topn=5)]
def test_softmax_loss_with_invalid_labels(self): scores = [[1., 3., 2.]] labels = [[0., -1., 1.]] loss = losses.SoftmaxLoss() self.assertAlmostEqual( loss(labels, scores).numpy(), -(ln(_softmax([1, 2])[1])), places=5)
def test_model_compile_keras(self): # Specify the training configuration (optimizer, loss, metrics). optimizer = tf.keras.optimizers.RMSprop() loss = losses.SoftmaxLoss() eval_metrics = [metrics.NDCGMetric("ndcg_5", topn=5)] ranker = model.create_keras_model(network=self.network, loss=loss, metrics=eval_metrics, optimizer=optimizer, size_feature_name=None) self.assertIs(ranker.optimizer, optimizer) self.assertIs(ranker.loss, loss)
def test_listwise_losses_are_serializable(self): self.assertIsLossSerializable( losses.SoftmaxLoss(lambda_weight=self._lambda_weight)) self.assertIsLossSerializable( losses.ListMLELoss(lambda_weight=self._lambda_weight)) self.assertIsLossSerializable( losses.ApproxMRRLoss(lambda_weight=self._lambda_weight)) self.assertIsLossSerializable( losses.ApproxNDCGLoss(lambda_weight=self._lambda_weight)) # TODO: Debug assertIsLossSerializable for Gumbel loss. Right now, # the loss values got from obj and the deserialized don't match exactly. self.assertIsSerializable(losses.GumbelApproxNDCGLoss(seed=1))