def _log_loss_metrics(labels, preds, class_ids, loss, weights):
     with tf.name_scope("metrics"):
         labels_mean = _indicator_labels_mean(labels=labels,
                                              weights=weights,
                                              name="label/mean")
         average_loss = tf.metrics.mean(loss, name='average_loss')
         accuracy = tf.metrics.accuracy(labels,
                                        class_ids,
                                        weights=weights,
                                        name="accuracy")
         precision = tf.metrics.precision(labels,
                                          class_ids,
                                          weights=weights,
                                          name="precision")
         recall = tf.metrics.recall(labels,
                                    class_ids,
                                    weights=weights,
                                    name="recall")
         accuracy_baseline = _accuracy_baseline(labels_mean)
         auc = _auc(labels, preds, weights=weights, name="auc")
         metric_op = {
             'average_loss': average_loss,
             'accuracy': accuracy,
             'precision': precision,
             'recall': recall,
             'accuracy_baseline': accuracy_baseline,
             'auc': auc
         }
         return metric_op
Exemple #2
0
 def _get_metrics(labels, logistic, class_ids, loss):
     with tf.name_scope(None, "metrics"):
         average_loss = tf.metrics.mean(loss, name='average_loss')
         accuracy = tf.metrics.accuracy(labels, class_ids, name="accuracy")
         precision = tf.metrics.precision(labels, class_ids, name="precision")
         recall = tf.metrics.recall(labels, class_ids, name="recall")
         auc = _auc(labels, logistic, name="auc")
         metric_op = {"average_loss": average_loss,
                      "accuracy": accuracy,
                      "precision": precision,
                      "recall": recall,
                      "auc": auc}
         return metric_op
def _get_metric_op(labels, logistic, class_ids, weights, unweighted_loss):
    """
    Return a dict of the metric Ops.
    :param labels: (Tensor) Labels tensor for training and evaluation.
    :param logistic: (Tensor) Predictions can be generated from it directly and auc metric also need it.
    :param class_ids:(Tensor)
    :param weights: (Tensor) Weight column tensor, used to calc weighted eval metric.
    :param unweighted_loss(Tensor)
    :return: Dict of metric results keyed by name.
    """
    with tf.name_scope(None, "metrics"):
        labels_mean = _indicator_labels_mean(labels=labels,
                                             weights=weights,
                                             name="label/mean")
        average_loss = tf.metrics.mean(unweighted_loss,
                                       weights,
                                       name="average_loss")
        accuracy = tf.metrics.accuracy(labels,
                                       class_ids,
                                       weights=weights,
                                       name="accuracy")
        precision = tf.metrics.precision(labels,
                                         class_ids,
                                         weights,
                                         name="precision")
        recall = tf.metrics.recall(labels, class_ids, weights, name="recall")
        accuracy_baseline = _accuracy_baseline(labels_mean)
        auc = _auc(labels, logistic, weights, name="auc")

        metric_op = {
            'average_loss': average_loss,
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'accuracy_baseline': accuracy_baseline,
            'auc': auc
        }

        return metric_op
Exemple #4
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
     for class_id in self._classes_for_class_based_metrics:
       batch_rank = array_ops.rank(probabilities) - 1
       begin = array_ops.concat(
           [array_ops.zeros([batch_rank], dtype=dtypes.int32), [class_id]],
           axis=0)
       size = array_ops.concat(
           [-1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]],
           axis=0)
       class_probabilities = array_ops.slice(
           probabilities, begin=begin, size=size)
       class_labels = array_ops.slice(labels, begin=begin, size=size)
       prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
           head_lib._predictions_mean(  # pylint:disable=protected-access
               predictions=class_probabilities,
               weights=weights,
               name=prob_key))
       auc_key = keys.AUC_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               name=auc_key))
       auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               curve='PR',
               name=auc_pr_key))
   return metric_ops
Exemple #5
0
 def _eval_metric_ops(self, labels, probabilities, weights, unreduced_loss,
                      regularization_loss):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics', [
             labels, probabilities, weights, unreduced_loss,
             regularization_loss
     ]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     values=unreduced_loss,
                     weights=weights,
                     name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, curve='PR',
                                 name=keys.AUC_PR),
         }
         if regularization_loss is not None:
             loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
                 self._name, keys.LOSS_REGULARIZATION)
             metric_ops[loss_regularization_key] = (metrics_lib.mean(
                 values=regularization_loss, name=keys.LOSS_REGULARIZATION))
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
         for class_id in self._classes_for_class_based_metrics:
             batch_rank = array_ops.rank(probabilities) - 1
             begin = array_ops.concat([
                 array_ops.zeros([batch_rank], dtype=dtypes.int32),
                 [class_id]
             ],
                                      axis=0)
             size = array_ops.concat([
                 -1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]
             ],
                                     axis=0)
             class_probabilities = array_ops.slice(probabilities,
                                                   begin=begin,
                                                   size=size)
             class_labels = array_ops.slice(labels, begin=begin, size=size)
             prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
                 head_lib._predictions_mean(  # pylint:disable=protected-access
                     predictions=class_probabilities,
                     weights=weights,
                     name=prob_key))
             auc_key = keys.AUC_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     name=auc_key))
             auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
             metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     curve='PR',
                     name=auc_pr_key))
     return metric_ops
Exemple #6
0
def _get_metrics(labels, logistic, class_ids, unweighted_loss, weights):
    with tf.name_scope(None, 'metrics'):
        ctcvr_label, ctr_label = labels['convert_label'], labels['click_label']
        ctr_weight, ctcvr_weight = weights['click_weight'], weights[
            'convert_weight']
        ctr_logistic, ctcvr_logistic = logistic['ctr_logistic'], logistic[
            'ctcvr_logistic']
        ctr_class_ids, ctcvr_class_ids = class_ids['ctr_class_ids'], class_ids[
            'ctcvr_class_ids']

        ctr_labels_mean = _indicator_labels_mean(labels=ctr_label,
                                                 weights=ctr_weight,
                                                 name="ctr_label/mean")
        ctcvr_labels_mean = _indicator_labels_mean(labels=ctcvr_label,
                                                   weights=ctcvr_weight,
                                                   name="ctcvr_label/mean")

        ctr_average_loss = tf.metrics.mean(unweighted_loss,
                                           ctr_weight,
                                           name='ctr_average_loss')
        ctcvr_average_loss = tf.metrics.mean(unweighted_loss,
                                             ctcvr_weight,
                                             name='ctcvr_average_loss')

        ctr_accuracy = tf.metrics.accuracy(ctr_label,
                                           ctr_class_ids,
                                           name='ctr_acc')
        ctcvr_accuracy = tf.metrics.accuracy(ctcvr_label,
                                             ctcvr_class_ids,
                                             name='ctcvr_acc')

        ctr_precision = tf.metrics.precision(ctr_label,
                                             ctr_class_ids,
                                             name="ctr_precision")
        ctcvr_precision = tf.metrics.precision(ctcvr_label,
                                               ctcvr_class_ids,
                                               name="cvr_precision")

        ctr_recall = tf.metrics.recall(ctr_label,
                                       ctr_class_ids,
                                       name="ctr_recall")
        ctcvr_recall = tf.metrics.recall(ctcvr_label,
                                         ctcvr_class_ids,
                                         name="cvr_recall")

        ctr_accuracy_baseline = _accuracy_baseline(ctr_labels_mean)
        ctcvr_accuracy_baseline = _accuracy_baseline(ctcvr_labels_mean)

        ctr_auc = _auc(ctr_label, ctr_logistic, name="ctr_auc")
        ctcvr_auc = _auc(ctcvr_label, ctcvr_logistic, name='ctcvr_auc')
        metric_op = {
            'ctr_average_loss': ctr_average_loss,
            'ctcvr_average_loss': ctcvr_average_loss,
            'ctr_accuracy': ctr_accuracy,
            'ctcvr_accuracy': ctcvr_accuracy,
            'ctr_precision': ctr_precision,
            'ctcvr_precision': ctcvr_precision,
            'ctr_recall': ctr_recall,
            'ctcvr_recall': ctcvr_recall,
            'ctr_accuracy_baseline': ctr_accuracy_baseline,
            'ctcvr_accuracy_baseline': ctcvr_accuracy_baseline,
            'ctr_auc': ctr_auc,
            'ctcvr_auc': ctcvr_auc
        }
        return metric_op