Ejemplo n.º 1
0
def get_pair_loss(pairwise_label_scores, pairwise_predicted_scores, params):
    """
  Paiwise learning-to-rank ranknet loss
  Check paper https://www.microsoft.com/en-us/research/publication/
  learning-to-rank-using-gradient-descent/
  for more information
  Args:
    pairwise_label_scores: a dense tensor of shape [n_data, n_data]
    pairwise_predicted_scores: a dense tensor of shape [n_data, n_data]
    n_data is the number of tweet candidates in a BatchPredictionRequest
    params: network parameters
  mask options: full_mask and diag_mask
  Returns:
    average loss over pairs defined by the masks
  """
    if params.mask == "full_mask":
        # full_mask that only covers pairs that have different labels
        # (all pairwise_label_scores = 0.5: selfs and same labels are 0s)
        mask, pair_count = masks.full_mask(pairwise_label_scores)
    elif params.mask == "diag_mask":
        # diag_mask that covers all pairs
        # (only selfs/diags are 0s)
        mask, pair_count = masks.diag_mask(pairwise_label_scores)
    else:
        mask, pair_count = masks.diag_mask(pairwise_label_scores)
    # pairwise sigmoid_cross_entropy_with_logits loss
    loss = tf.cond(
        tf.equal(pair_count, 0),
        lambda: 0., lambda: _get_average_cross_entropy_loss(
            pairwise_label_scores, pairwise_label_scores, mask, pair_count))
    return loss
Ejemplo n.º 2
0
def get_hinge_loss(pairwise_label_scores, pairwise_predicted_scores, params):
    """
  Paiwise learning-to-rank ranknet loss
  Check paper https://www.microsoft.com/en-us/research/publication/
  learning-to-rank-using-gradient-descent/
  for more information
  Args:
    pairwise_label_scores: a dense tensor of shape [n_data, n_data]
    pairwise_predicted_scores: a dense tensor of shape [n_data, n_data]
    n_data is the number of tweet candidates in a BatchPredictionRequest
    params: network parameters
  mask options: full_mask and diag_mask
  Returns:
    average loss over pairs defined by the masks
  """

    # only full_mask is appropriate in hinge_loss
    # hinge_loss needs 0, 1 labels, which later
    # converted to -1, 1 labels
    mask, pair_count = masks.full_mask(pairwise_label_scores)

    # pairwise sigmoid_cross_entropy_with_logits loss
    loss = tf.cond(
        tf.equal(pair_count, 0), lambda: 0., lambda: _get_average_hinge_loss(
            pairwise_label_scores, pairwise_predicted_scores, mask, pair_count)
    )
    return loss
Ejemplo n.º 3
0
def get_lambda_pair_loss(pairwise_label_scores, pairwise_predicted_scores,
                         params, swapped_ndcg):
    """
  Paiwise learning-to-rank lambdarank loss
  faster than the previous gradient method
  Note: this loss depends on ranknet cross-entropy
  delta NDCG is applied to ranknet cross-entropy
  Hence, it is still a gradient descent method
  Check paper http://citeseerx.ist.psu.edu/viewdoc/
  download?doi=10.1.1.180.634&rep=rep1&type=pdf for more information
  for more information
  Args:
    pairwise_label_scores: a dense tensor of shape [n_data, n_data]
    pairwise_predicted_scores: a dense tensor of shape [n_data, n_data]
    n_data is the number of tweet candidates in a BatchPredictionRequest
    params: network parameters
    swapped_ndcg: swapped ndcg of shape [n_data, n_data]
    ndcg values when swapping each pair in the prediction ranking order
  mask options: full_mask and diag_mask
  Returns:
    average loss over pairs defined by the masks
  """
    n_data = tf.shape(pairwise_label_scores)[0]
    if params.mask == "full_mask":
        # full_mask that only covers pairs that have different labels
        # (all pairwise_label_scores = 0.5: selfs and same labels are 0s)
        mask, pair_count = masks.full_mask(pairwise_label_scores)
    else:
        # diag_mask that covers all pairs
        # (only selfs/diags are 0s)
        mask, pair_count = masks.diag_mask(pairwise_label_scores)

    # pairwise sigmoid_cross_entropy_with_logits loss
    loss = tf.cond(
        tf.equal(pair_count, 0),
        lambda: 0., lambda: _get_average_cross_entropy_loss(
            pairwise_label_scores, pairwise_predicted_scores, mask, pair_count,
            swapped_ndcg))
    return loss