def _testRegularizationPenalty(var, l1_scale, l2_scale, l1_penalty_truth, l2_penalty_truth): """Regularization test for a given variable value""" with tf.Graph().as_default() as g: tf.Variable(var, trainable=True) # Register variable in the graph # Get L1 loss l1_hparams = tf.contrib.training.HParams(l1=l1_scale, l2=None) l1_penalty = compute_regularization_penalty(l1_hparams) # Get L2 loss l2_hparams = tf.contrib.training.HParams(l1=None, l2=l2_scale) l2_penalty = compute_regularization_penalty(l2_hparams) # Get elastic net loss elastic_net_hparams = tf.contrib.training.HParams(l1=l1_scale, l2=l2_scale) elastic_net_penalty = compute_regularization_penalty( elastic_net_hparams) with tf.Session(graph=g) as sess: sess.run([tf.global_variables_initializer()]) self.assertAlmostEqual(l1_penalty.eval(), l1_penalty_truth) self.assertAlmostEqual(l2_penalty.eval(), l2_penalty_truth) self.assertAlmostEqual(elastic_net_penalty.eval(), l1_penalty_truth + l2_penalty_truth)
def compute_loss(hparams, scores, labels, group_size, weight): """ Computes ranking/classification loss with regularization """ if weight is None: raise ValueError("weight should not be None") return compute_rank_clf_loss( hparams, scores, labels, group_size, weight ) + tf.reduce_mean(weight) * compute_regularization_penalty(hparams)
def _testRegularizationPenalty(self, var, l1_scale, l2_scale, l1_penalty_truth, l2_penalty_truth): """Regularization test for a given variable value""" # Get L1 loss l1_penalty = compute_regularization_penalty(l1_scale, None, [var]) # Get L2 loss l2_penalty = compute_regularization_penalty(None, l2_scale, [var]) # Get elastic net loss elastic_net_penalty = compute_regularization_penalty( l1_scale, l2_scale, [var]) self.assertAlmostEqual(l1_penalty.numpy(), l1_penalty_truth, places=self.places) self.assertAlmostEqual(l2_penalty.numpy(), l2_penalty_truth, places=self.places) self.assertAlmostEqual(elastic_net_penalty.numpy(), l1_penalty_truth + l2_penalty_truth, places=self.places)