def normalized_discounted_cumulative_gain( labels, predictions, weights=None, topn=None, name=None, gain_fn=_DEFAULT_GAIN_FN, rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN): """Computes normalized discounted cumulative gain (NDCG). Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. topn: A cutoff for how many examples to consider for this metric. name: A string used as the name for this metric. gain_fn: (function) Transforms labels. rank_discount_fn: (function) The rank discount function. Returns: A metric for the weighted normalized discounted cumulative gain of the batch. """ metric = metrics_impl.NDCGMetric(name, topn, gain_fn, rank_discount_fn) with tf.compat.v1.name_scope(metric.name, 'normalized_discounted_cumulative_gain', (labels, predictions, weights)): per_list_ndcg, per_list_weights = metric.compute( labels, predictions, weights) return tf.compat.v1.metrics.mean(per_list_ndcg, per_list_weights)
def normalized_discounted_cumulative_gain_NEW( labels, predictions, weights=None, topn=None, name=None, gain_fn=_DEFAULT_GAIN_FN, rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN): metric = metrics_impl.NDCGMetric(name, topn, gain_fn, rank_discount_fn) with tf.compat.v1.name_scope(metric.name, 'normalized_discounted_cumulative_gain_NEW', (labels, predictions, weights)): per_list_ndcg, per_list_weights = metric.compute( labels, predictions, weights) return tf.compat.v1.metrics.mean(per_list_ndcg, per_list_weights)
def __init__(self, name=None, topn=None, gain_fn=_DEFAULT_GAIN_FN, rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN, dtype=None, **kwargs): super(NDCGMetric, self).__init__(name=name, dtype=dtype, **kwargs) self._topn = topn self._gain_fn = gain_fn self._rank_discount_fn = rank_discount_fn self._metric = metrics_impl.NDCGMetric( name=name, topn=topn, gain_fn=gain_fn, rank_discount_fn=rank_discount_fn)
def __init__(self, name=None, topn=None, gain_fn=None, rank_discount_fn=None, dtype=None, ragged=False, **kwargs): super(NDCGMetric, self).__init__(name=name, dtype=dtype, **kwargs) self._topn = topn self._gain_fn = gain_fn or utils.pow_minus_1 self._rank_discount_fn = rank_discount_fn or utils.log2_inverse self._metric = metrics_impl.NDCGMetric( name=name, topn=topn, gain_fn=self._gain_fn, rank_discount_fn=self._rank_discount_fn, ragged=ragged)
def compute_mean(metric_key, labels, predictions, weights=None, topn=None, name=None): """Returns the mean of the specified metric given the inputs. Args: metric_key: A key in `RankingMetricKey`. labels: A `Tensor` of the same shape as `predictions` representing relevance. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. topn: An `integer` specifying the cutoff of how many items are considered in the metric. name: A `string` used as the name for this metric. Returns: A scalar as the computed metric. """ metric_dict = { RankingMetricKey.ARP: metrics_impl.ARPMetric(metric_key), RankingMetricKey.MRR: metrics_impl.MRRMetric(metric_key, topn), RankingMetricKey.NDCG: metrics_impl.NDCGMetric(name, topn), RankingMetricKey.DCG: metrics_impl.DCGMetric(name, topn), RankingMetricKey.PRECISION: metrics_impl.PrecisionMetric(name, topn), RankingMetricKey.RECALL: metrics_impl.RecallMetric(name, topn), RankingMetricKey.MAP: metrics_impl.MeanAveragePrecisionMetric(name, topn), RankingMetricKey.ORDERED_PAIR_ACCURACY: metrics_impl.OPAMetric(name), RankingMetricKey.BPREF: metrics_impl.BPrefMetric(name, topn), RankingMetricKey.HITS: metrics_impl.HitsMetric(metric_key, topn), } assert metric_key in metric_dict, ('metric_key %s not supported.' % metric_key) # TODO: Add mask argument for metric.compute() call metric, weight = metric_dict[metric_key].compute(labels, predictions, weights) return tf.compat.v1.div_no_nan(tf.reduce_sum(input_tensor=metric * weight), tf.reduce_sum(input_tensor=weight))