def ordered_pair_accuracy(labels, predictions, weights=None, name=None): """Computes the percentage of correctedly ordered pair. For any pair of examples, we compare their orders determined by `labels` and `predictions`. They are correctly ordered if the two orders are compatible. That is, labels l_i > l_j and predictions s_i > s_j and the weight for this pair is the weight from the l_i. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. name: A string used as the name for this metric. Returns: A metric for the accuracy or ordered pairs. """ metric = metrics_impl.OPAMetric(name) with tf.compat.v1.name_scope(metric.name, 'ordered_pair_accuracy', (labels, predictions, weights)): correct_pairs, pair_weights = metric.compute(labels, predictions, weights) return tf.compat.v1.metrics.mean(correct_pairs, pair_weights)
def compute_mean(metric_key, labels, predictions, weights=None, topn=None, name=None): """Returns the mean of the specified metric given the inputs. Args: metric_key: A key in `RankingMetricKey`. labels: A `Tensor` of the same shape as `predictions` representing relevance. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. topn: An `integer` specifying the cutoff of how many items are considered in the metric. name: A `string` used as the name for this metric. Returns: A scalar as the computed metric. """ metric_dict = { RankingMetricKey.ARP: metrics_impl.ARPMetric(metric_key), RankingMetricKey.MRR: metrics_impl.MRRMetric(metric_key, topn), RankingMetricKey.NDCG: metrics_impl.NDCGMetric(name, topn), RankingMetricKey.DCG: metrics_impl.DCGMetric(name, topn), RankingMetricKey.PRECISION: metrics_impl.PrecisionMetric(name, topn), RankingMetricKey.RECALL: metrics_impl.RecallMetric(name, topn), RankingMetricKey.MAP: metrics_impl.MeanAveragePrecisionMetric(name, topn), RankingMetricKey.ORDERED_PAIR_ACCURACY: metrics_impl.OPAMetric(name), RankingMetricKey.BPREF: metrics_impl.BPrefMetric(name, topn), RankingMetricKey.HITS: metrics_impl.HitsMetric(metric_key, topn), } assert metric_key in metric_dict, ('metric_key %s not supported.' % metric_key) # TODO: Add mask argument for metric.compute() call metric, weight = metric_dict[metric_key].compute(labels, predictions, weights) return tf.compat.v1.div_no_nan(tf.reduce_sum(input_tensor=metric * weight), tf.reduce_sum(input_tensor=weight))
def __init__(self, name=None, dtype=None, **kwargs): super(OPAMetric, self).__init__(name=name, dtype=dtype, **kwargs) self._metric = metrics_impl.OPAMetric(name=name)