コード例 #1
0
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
コード例 #2
0
ファイル: head.py プロジェクト: AndrewTwinz/tensorflow
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
コード例 #3
0
ファイル: head.py プロジェクト: vinegreti2010/CFRFServers
 def _eval_metric_ops(self, labels, probabilities, weights,
                      weighted_sum_loss, example_weight_sum):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics', [
             labels, probabilities, weights, weighted_sum_loss,
             example_weight_sum
     ]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     # Both values and weights here are reduced, scalar Tensors.
                     # values is the actual mean we want, but we pass the scalar
                     # example_weight_sum in order to return the correct update_op
                     # alongside the value_op for streaming metrics.
                     values=(weighted_sum_loss / example_weight_sum),
                     weights=example_weight_sum,
                     name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, curve='PR',
                                 name=keys.AUC_PR),
         }
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
     return metric_ops
コード例 #4
0
ファイル: head.py プロジェクト: AbhinavJain13/tensorflow
 def _eval_metric_ops(self, labels, probabilities, weights, weighted_sum_loss,
                      example_weight_sum):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, weighted_sum_loss, example_weight_sum
       ]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 # Both values and weights here are reduced, scalar Tensors.
                 # values is the actual mean we want, but we pass the scalar
                 # example_weight_sum in order to return the correct update_op
                 # alongside the value_op for streaming metrics.
                 values=(weighted_sum_loss / example_weight_sum),
                 weights=example_weight_sum,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
   return metric_ops
コード例 #5
0
    def model_fn(self, mode, features, labels, params):
        c = variable_scope.get_variable('c',
                                        initializer=constant_op.constant(
                                            10, dtype=dtypes.float64),
                                        dtype=dtypes.float64)

        predictions = math_ops.multiply(features, c)

        loss = None
        if mode is not model_fn_lib.ModeKeys.PREDICT:
            loss = losses.absolute_difference(labels=labels,
                                              predictions=predictions,
                                              reduction=losses.Reduction.SUM)
            loss = math_ops.reduce_sum(loss)

        metrics = {
            'accuracy': metrics_lib.accuracy(labels, predictions),
            'auc': metrics_lib.auc(labels, predictions)
        }

        return model_fn_lib.EstimatorSpec(
            mode=mode,
            loss=loss,
            eval_metric_ops=metrics,
            predictions={'probabilities': predictions},
            train_op=control_flow_ops.no_op(
            ))  # This train_op isn't actually used.
コード例 #6
0
  def model_fn(self, mode, features, labels, params):
    c = variable_scope.get_variable(
        'c',
        initializer=constant_op.constant(10, dtype=dtypes.float64),
        dtype=dtypes.float64)

    predictions = math_ops.multiply(features, c)

    loss = None
    if mode is not model_fn_lib.ModeKeys.PREDICT:
      loss = losses.absolute_difference(
          labels=labels,
          predictions=predictions,
          reduction=losses.Reduction.SUM)
      loss = math_ops.reduce_sum(loss)

    metrics = {
        'accuracy': metrics_lib.accuracy(labels, predictions),
        'auc': metrics_lib.auc(labels, predictions)
    }

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        loss=loss,
        eval_metric_ops=metrics,
        predictions={'probabilities': predictions},
        train_op=control_flow_ops.no_op())  # This train_op isn't actually used.
コード例 #7
0
 def _metric_fn(x):
     labels = x["labels"]
     predictions = x["predictions"]
     return metrics.auc(labels,
                        predictions,
                        num_thresholds=8,
                        curve="PR",
                        summation_method="careful_interpolation")
コード例 #8
0
ファイル: head.py プロジェクト: AbhinavJain13/tensorflow
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
  with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions, name='predictions')
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
    return metrics_lib.auc(
        labels=labels, predictions=predictions, weights=weights, curve=curve,
        name=scope)
コード例 #9
0
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
  with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions, name='predictions')
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
    return metrics_lib.auc(
        labels=labels, predictions=predictions, weights=weights, curve=curve,
        name=scope)
コード例 #10
0
  def create_eval_metrics(self, noise):
    predictions = np.array([0.1, 0.2, 0.3, 0.6 + noise])
    labels = np.array([0.1, 0.2, 0.3, 0.6])

    metrics = {
        'accuracy': metrics_lib.accuracy(labels, predictions),
        'auc': metrics_lib.auc(labels, predictions)
    }
    return metrics
コード例 #11
0
    def create_eval_metrics(self, noise):
        predictions = np.array([0.1, 0.2, 0.3, 0.6 + noise])
        labels = np.array([0.1, 0.2, 0.3, 0.6])

        metrics = {
            'accuracy': metrics_lib.accuracy(labels, predictions),
            'auc': metrics_lib.auc(labels, predictions)
        }
        return metrics
コード例 #12
0
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
  with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions, name='predictions')
    if labels.dtype.base_dtype != dtypes.bool:
      logging.warning('Casting %s labels to bool.', labels.dtype)
      labels = math_ops.cast(labels, dtypes.bool)
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
    return metrics_lib.auc(
        labels=labels, predictions=predictions, weights=weights, curve=curve,
        name=scope)
コード例 #13
0
ファイル: head.py プロジェクト: vaccine/tensorflow
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
  with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions, name='predictions')
    if labels.dtype.base_dtype != dtypes.bool:
      logging.warning('Casting %s labels to bool.', labels.dtype)
      labels = math_ops.cast(labels, dtypes.bool)
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
    return metrics_lib.auc(
        labels=labels, predictions=predictions, weights=weights, curve=curve,
        name=scope)
コード例 #14
0
    def model_fn(self, mode, features, labels, params):
        c = variable_scope.get_variable('c',
                                        initializer=constant_op.constant(
                                            10, dtype=dtypes.float64),
                                        dtype=dtypes.float64)

        predictions = {'probabilities': math_ops.multiply(features, c)}
        loss = losses.absolute_difference(
            labels=labels,
            predictions=predictions['probabilities'],
            reduction=losses.Reduction.SUM)

        metrics = {
            'accuracy': metrics_lib.accuracy(labels,
                                             predictions['probabilities']),
            'auc': metrics_lib.auc(labels, predictions['probabilities'])
        }
        tensor_string_repr = str(features)
        classes = constant_op.constant(re.search('(split_inputs/split:[0-9])',
                                                 tensor_string_repr).group(1),
                                       dtype=dtypes.string)

        export_outputs = {
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(predictions),
            'classification_output':
            export_output.ClassificationOutput(predictions['probabilities'],
                                               classes),
            'classification_scores':
            export_output.ClassificationOutput(
                scores=predictions['probabilities']),
            'classification_classes':
            export_output.ClassificationOutput(classes=classes),
            'regression_output':
            export_output.RegressionOutput(predictions['probabilities']),
        }

        return model_fn_lib.EstimatorSpec(
            mode=mode,
            loss=math_ops.reduce_sum(loss),
            eval_metric_ops=metrics,
            predictions=predictions,
            train_op=loss,  # This train_op isn't actually used.
            export_outputs=export_outputs)
コード例 #15
0
  def model_fn(self, mode, features, labels, params):
    c = variable_scope.get_variable(
        'c',
        initializer=constant_op.constant(10, dtype=dtypes.float64),
        dtype=dtypes.float64)

    predictions = {'probabilities': math_ops.multiply(features, c)}
    loss = losses.absolute_difference(
        labels=labels,
        predictions=predictions['probabilities'],
        reduction=losses.Reduction.SUM)

    metrics = {
        'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']),
        'auc': metrics_lib.auc(labels, predictions['probabilities'])
    }
    tensor_string_repr = str(features)
    classes = constant_op.constant(
        re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1),
        dtype=dtypes.string)

    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(predictions),
        'classification_output':
            export_output.ClassificationOutput(predictions['probabilities'],
                                               classes),
        'classification_scores':
            export_output.ClassificationOutput(
                scores=predictions['probabilities']),
        'classification_classes':
            export_output.ClassificationOutput(classes=classes),
        'regression_output':
            export_output.RegressionOutput(predictions['probabilities']),
    }

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        loss=math_ops.reduce_sum(loss),
        eval_metric_ops=metrics,
        predictions=predictions,
        train_op=loss,  # This train_op isn't actually used.
        export_outputs=export_outputs)
コード例 #16
0
ファイル: head.py プロジェクト: didukhle/tensorflow
 def _eval_metric_ops(
     self, labels, probabilities, weights, unreduced_loss,
     regularization_loss):
   """Returns a dict of metrics for eval_metric_ops."""
   with ops.name_scope(
       None, 'metrics',
       [labels, probabilities, weights, unreduced_loss, regularization_loss]):
     keys = metric_keys.MetricKeys
     metric_ops = {
         # Estimator already adds a metric for loss.
         head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
             metrics_lib.mean(
                 values=unreduced_loss,
                 weights=weights,
                 name=keys.LOSS_MEAN),
         head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, name=keys.AUC),
         head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
             metrics_lib.auc(labels=labels, predictions=probabilities,
                             weights=weights, curve='PR',
                             name=keys.AUC_PR),
     }
     if regularization_loss is not None:
       loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
           self._name, keys.LOSS_REGULARIZATION)
       metric_ops[loss_regularization_key] = (
           metrics_lib.mean(
               values=regularization_loss,
               name=keys.LOSS_REGULARIZATION))
     for threshold in self._thresholds:
       accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
           head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=accuracy_key))
       # Precision for positive examples.
       precision_key = keys.PRECISION_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, precision_key)] = (  # pylint:disable=protected-access
           head_lib._precision_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=precision_key))
       # Recall for positive examples.
       recall_key = keys.RECALL_AT_THRESHOLD % threshold
       metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
           head_lib._recall_at_threshold(  # pylint:disable=protected-access
               labels=labels,
               predictions=probabilities,
               weights=weights,
               threshold=threshold,
               name=recall_key))
     for class_id in self._classes_for_class_based_metrics:
       batch_rank = array_ops.rank(probabilities) - 1
       begin = array_ops.concat(
           [array_ops.zeros([batch_rank], dtype=dtypes.int32), [class_id]],
           axis=0)
       size = array_ops.concat(
           [-1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]],
           axis=0)
       class_probabilities = array_ops.slice(
           probabilities, begin=begin, size=size)
       class_labels = array_ops.slice(labels, begin=begin, size=size)
       prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
           head_lib._predictions_mean(  # pylint:disable=protected-access
               predictions=class_probabilities,
               weights=weights,
               name=prob_key))
       auc_key = keys.AUC_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               name=auc_key))
       auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
       metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
           head_lib._auc(  # pylint:disable=protected-access
               labels=class_labels,
               predictions=class_probabilities,
               weights=weights,
               curve='PR',
               name=auc_pr_key))
   return metric_ops
コード例 #17
0
ファイル: head.py プロジェクト: yueyedeai/estimator
 def _eval_metric_ops(self, labels, probabilities, weights, unreduced_loss,
                      regularization_loss):
     """Returns a dict of metrics for eval_metric_ops."""
     with ops.name_scope(None, 'metrics', [
             labels, probabilities, weights, unreduced_loss,
             regularization_loss
     ]):
         keys = metric_keys.MetricKeys
         metric_ops = {
             # Estimator already adds a metric for loss.
             head_lib._summary_key(self._name, keys.LOSS_MEAN):  # pylint:disable=protected-access
                 metrics_lib.mean(
                     values=unreduced_loss,
                     weights=weights,
                     name=keys.LOSS_MEAN),
             head_lib._summary_key(self._name, keys.AUC):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, name=keys.AUC),
             head_lib._summary_key(self._name, keys.AUC_PR):  # pylint:disable=protected-access
                 metrics_lib.auc(labels=labels, predictions=probabilities,
                                 weights=weights, curve='PR',
                                 name=keys.AUC_PR),
         }
         if regularization_loss is not None:
             loss_regularization_key = head_lib._summary_key(  # pylint:disable=protected-access
                 self._name, keys.LOSS_REGULARIZATION)
             metric_ops[loss_regularization_key] = (metrics_lib.mean(
                 values=regularization_loss, name=keys.LOSS_REGULARIZATION))
         for threshold in self._thresholds:
             accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, accuracy_key)] = (  # pylint:disable=protected-access
                 head_lib._accuracy_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=accuracy_key))
             # Precision for positive examples.
             precision_key = keys.PRECISION_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(
                 self._name, precision_key)] = (  # pylint:disable=protected-access
                     head_lib._precision_at_threshold(  # pylint:disable=protected-access
                         labels=labels,
                         predictions=probabilities,
                         weights=weights,
                         threshold=threshold,
                         name=precision_key))
             # Recall for positive examples.
             recall_key = keys.RECALL_AT_THRESHOLD % threshold
             metric_ops[head_lib._summary_key(self._name, recall_key)] = (  # pylint:disable=protected-access
                 head_lib._recall_at_threshold(  # pylint:disable=protected-access
                     labels=labels,
                     predictions=probabilities,
                     weights=weights,
                     threshold=threshold,
                     name=recall_key))
         for class_id in self._classes_for_class_based_metrics:
             batch_rank = array_ops.rank(probabilities) - 1
             begin = array_ops.concat([
                 array_ops.zeros([batch_rank], dtype=dtypes.int32),
                 [class_id]
             ],
                                      axis=0)
             size = array_ops.concat([
                 -1 * array_ops.ones([batch_rank], dtype=dtypes.int32), [1]
             ],
                                     axis=0)
             class_probabilities = array_ops.slice(probabilities,
                                                   begin=begin,
                                                   size=size)
             class_labels = array_ops.slice(labels, begin=begin, size=size)
             if self._label_vocabulary is None:
                 prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
             else:
                 prob_key = (keys.PROBABILITY_MEAN_AT_NAME %
                             self._label_vocabulary[class_id])
             metric_ops[head_lib._summary_key(self._name, prob_key)] = (  # pylint:disable=protected-access
                 head_lib._predictions_mean(  # pylint:disable=protected-access
                     predictions=class_probabilities,
                     weights=weights,
                     name=prob_key))
             if self._label_vocabulary is None:
                 auc_key = keys.AUC_AT_CLASS % class_id
             else:
                 auc_key = keys.AUC_AT_NAME % self._label_vocabulary[
                     class_id]
             metric_ops[head_lib._summary_key(self._name, auc_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     name=auc_key))
             if self._label_vocabulary is None:
                 auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
             else:
                 auc_pr_key = keys.AUC_PR_AT_NAME % self._label_vocabulary[
                     class_id]
             metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = (  # pylint:disable=protected-access
                 head_lib._auc(  # pylint:disable=protected-access
                     labels=class_labels,
                     predictions=class_probabilities,
                     weights=weights,
                     curve='PR',
                     name=auc_pr_key))
     return metric_ops
コード例 #18
0
ファイル: eval_metrics.py プロジェクト: neuroph12/CNNDDDD
def _auc(probs, targets, weights=None):
    return metrics.auc(labels=targets,
                       predictions=array_ops.slice(probs, [0, 1], [-1, 1]),
                       weights=weights)
コード例 #19
0
 def my_auc(labels, predictions):
     from tensorflow.python.ops import metrics as metrics_lib
     auc = metrics_lib.auc(
         labels=labels, predictions=predictions['logistic'], curve='ROC',
         name='my_auc')
     return {'my_auc': auc}
コード例 #20
0
 def _metric_fn(x):
   labels = x["labels"]
   predictions = x["predictions"]
   return metrics.auc(labels, predictions, num_thresholds=8, curve="PR",
                      summation_method="careful_interpolation")
コード例 #21
0
def _auc(probs, targets, weights=None):
  return metrics.auc(
      labels=targets,
      predictions=array_ops.slice(probs, [0, 1], [-1, 1]),
      weights=weights)