예제 #1
0
def _get_attrank_cross_entropy(labels, logits):
    # logits is not safe based on their satement
    # do not use this function directly elsewhere
    results = labels * math_fns.safe_log(logits) + (
        1 - labels) * math_fns.safe_log(1 - logits)
    results = (-1) * results
    results = tf.reduce_mean(results)
    return results
예제 #2
0
 def loop_body(iteration, loss, exp_sum, exps):
     temp = tf.gather(exps, [iteration])
     temp = tf.reshape(temp, [])
     exp_sum = tf.subtract(exp_sum, temp)
     # clip exp_sum for safer log
     loss = tf.add(loss, math_fns.safe_log(exp_sum))
     return tf.add(iteration, 1), loss, exp_sum, exps
예제 #3
0
def get_listmle_loss(labels, predicted_scores):
    """
  listwise learning-to-rank listMLE loss
  Note: Simplified MLE formula is used in here (omit the proof in here)
  \sum_{s=1}^{n-1} (-predicted_scores + ln(\sum_{i=s}^n exp(predicted_scores)))
  n is tf.shape(predicted_scores)[0]
  Check paper http://icml2008.cs.helsinki.fi/papers/167.pdf for more information
  Args:
    labels: a dense tensor of shape [n_data, 1]
    n_data is the number of tweet candidates in a BatchPredictionRequest
    predicted_scores: a dense tensor of same shape and type as labels
  Returns:
    average loss
  """
    labels = tf.reshape(labels, [-1, 1])
    n_data = tf.shape(labels)[0]
    predicted_scores = tf.reshape(predicted_scores, [-1, 1])

    predicted_scores_ordered_by_labels = _get_ordered_predicted_scores(
        labels, predicted_scores, n_data)

    loss = (-1) * tf.reduce_sum(predicted_scores)
    # sum over 1 to n_data - 1
    temp = tf.gather(predicted_scores_ordered_by_labels, [n_data - 1])
    temp = tf.reshape(temp, [])
    loss = tf.add(loss, temp)

    exps = tf.exp(predicted_scores_ordered_by_labels)
    exp_sum = tf.reduce_sum(exps)
    # clip exp_sum for safer log
    loss = tf.add(loss, math_fns.safe_log(exp_sum))

    iteration = tf.constant(0)

    def _cond(iteration, loss, exp_sum, exps):
        return tf.less(iteration, n_data - 2)

    def _gen_loop_body():
        def loop_body(iteration, loss, exp_sum, exps):
            temp = tf.gather(exps, [iteration])
            temp = tf.reshape(temp, [])
            exp_sum = tf.subtract(exp_sum, temp)
            # clip exp_sum for safer log
            loss = tf.add(loss, math_fns.safe_log(exp_sum))
            return tf.add(iteration, 1), loss, exp_sum, exps

        return loop_body

    iteration, loss, exp_sum, exps = tf.while_loop(
        _cond, _gen_loop_body(), (iteration, loss, exp_sum, exps))
    loss = loss / tf.cast(n_data, dtype=tf.float32)
    loss += get_listnet_loss(labels, predicted_scores)
    return loss
예제 #4
0
def _get_listnet_cross_entropy(labels, logits):
    """
  Used in listnet
  cross entropy on top-one probabilities
  between ideal/label top-one probabilities
  and predicted/logits top-one probabilities
  for a query/batch/batchPreidictionRequest
  """
    # it is safe to use log on logits
    # that come from _get_top_one_probs
    # do not use this function directly elsewhere
    results = (-1) * labels * math_fns.safe_log(logits)
    return results
예제 #5
0
        def loop_body(iteration, loss, exp_sum, G_t):
            temp_exp = tf.gather(exps, [iteration])
            temp_exp = tf.reshape(temp_exp, [])
            exp_sum = tf.add(exp_sum, temp_exp)

            temp_reward = tf.gather(dcg_k, [iteration])
            temp_reward = tf.reshape(temp_reward, [])
            G_t = tf.add(G_t, temp_reward)

            # clip exp_sum for safer log
            log_exp_sum = math_fns.safe_log(exp_sum)

            negative_prediction = (-1) * tf.gather(sorted_predictions,
                                                   [iteration])
            negative_prediction = tf.reshape(negative_prediction, [])

            temp_sum = tf.add(negative_prediction, log_exp_sum)
            temp_loss = tf.multiply(G_t, temp_sum)
            loss = tf.add(loss, temp_loss)
            return tf.subtract(iteration, 1), loss, exp_sum, G_t