Ejemplo n.º 1
0
 def _evaluate_ops(self, features):
     """Add ops for evaluation (aka filtering) to the graph."""
     mode = estimator_lib.ModeKeys.EVAL
     with variable_scope.variable_scope("model", use_resource=True):
         model_outputs = self.create_loss(features, mode)
     metrics = {}
     # Just output in-sample predictions for the last chunk seen
     for prediction_key, prediction_value in model_outputs.predictions.items(
     ):
         metrics[prediction_key] = _identity_metric_single(
             prediction_key, prediction_value)
     metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
         feature_keys.FilteringResults.TIMES,
         model_outputs.prediction_times)
     metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
         _identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
                                 model_outputs.end_state))
     metrics[metric_keys.MetricKeys.LOSS_MEAN] = metrics_impl.mean(
         model_outputs.loss, name="average_loss")
     return estimator_lib.EstimatorSpec(
         loss=model_outputs.loss,
         mode=mode,
         eval_metric_ops=metrics,
         # needed for custom metrics.
         predictions=model_outputs.predictions)
Ejemplo n.º 2
0
 def my_model_fn(features, mode):
     loss = math_ops.reduce_max(features)
     eval_metric_ops = {
         "feature_mean": metrics_impl.mean(features),
     }
     return model_fn_lib.EstimatorSpec(mode,
                                       loss=loss,
                                       eval_metric_ops=eval_metric_ops)
Ejemplo n.º 3
0
  def top_5_accuracy(labels,
             predictions,
             weights=None,
             metrics_collections=None,
             updates_collections=None,
             name=None):
    """Calculates how often `predictions` matches `labels`.
    The `accuracy` function creates two local variables, `total` and
    `count` that are used to compute the frequency with which `predictions`
    matches `labels`. This frequency is ultimately returned as `accuracy`: an
    idempotent operation that simply divides `total` by `count`.
    For estimation of the metric over a stream of data, the function creates an
    `update_op` operation that updates these variables and returns the `accuracy`.
    Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
    where the corresponding elements of `predictions` and `labels` match and 0.0
    otherwise. Then `update_op` increments `total` with the reduced sum of the
    product of `weights` and `is_correct`, and it increments `count` with the
    reduced sum of `weights`.
    If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
    Args:
    labels: The ground truth values, a `Tensor` whose shape matches
      `predictions`.
    predictions: The predicted values, a `Tensor` of any shape.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `accuracy` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
    Returns:
    accuracy: A `Tensor` representing the accuracy, the value of `total` divided
      by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `accuracy`.
    Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
    RuntimeError: If eager execution is enabled.
    """
    if context.executing_eagerly():
        raise RuntimeError('tf.metrics.accuracy is not supported when eager '
                           'execution is enabled.')

    predictions, labels, weights = _remove_squeezable_dimensions(
        predictions=predictions, labels=labels, weights=weights
    )
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    if labels.dtype != predictions.dtype:
        predictions = math_ops.cast(predictions, labels.dtype)
    is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
    is_correct = math_ops.reduce_prod(is_correct, axis=-2)
    is_correct = math_ops.reduce_max(is_correct, axis=-1)
    return mean(is_correct, weights, metrics_collections, updates_collections, name or 'accuracy')
Ejemplo n.º 4
0
def mean_absolute_percentage_error(
        labels,
        predictions,
        weights=None,
        metrics_collections=None,
        updates_collections=None,
        name=None):
    predictions, labels, weights = _remove_squeezable_dimensions(
        predictions=predictions, labels=labels, weights=weights)

    absolute_percentage_errors = math_ops.abs(math_ops.div(predictions-labels, labels))
    return mean(absolute_percentage_errors, weights, metrics_collections,
                updates_collections, name or 'mean_absolute_percentage_error')
Ejemplo n.º 5
0
            def rmspe(labels, predictions, weights=None):
                if context.executing_eagerly():
                    raise RuntimeError('rmspe is not supported '
                                       'when eager execution is enabled.')

                predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(
                    predictions=predictions, labels=labels, weights=weights)
                # The target has been take log1p, so take expm1 back
                labels, predictions = math_ops.expm1(labels), math_ops.expm1(
                    predictions)
                mspe, update_op = metrics_impl.mean(
                    math_ops.square((labels - predictions) / labels), weights)
                rmspe = math_ops.sqrt(mspe)
                rmspe_update_op = math_ops.sqrt(update_op)
                return rmspe, rmspe_update_op
Ejemplo n.º 6
0
 def my_model_fn(features, labels, mode):
     loss = features + labels
     # Make different graphs for train and eval
     if mode == model_fn.ModeKeys.TRAIN:
         train_op = array_ops.identity(loss)
         return model_fn.EstimatorSpec(mode=mode,
                                       loss=loss,
                                       train_op=train_op)
     elif mode == model_fn.ModeKeys.EVAL:
         eval_metric_ops = {
             "metric": metrics_impl.mean(features * labels)
         }
         return model_fn.EstimatorSpec(mode=mode,
                                       loss=loss,
                                       eval_metric_ops=eval_metric_ops)
     else:
         raise NotImplementedError(mode)
Ejemplo n.º 7
0
 def _evaluate_ops(self, features):
   """Add ops for evaluation (aka filtering) to the graph."""
   mode = estimator_lib.ModeKeys.EVAL
   with variable_scope.variable_scope("model", use_resource=True):
     model_outputs = self.create_loss(features, mode)
   metrics = {}
   # Just output in-sample predictions for the last chunk seen
   for prediction_key, prediction_value in model_outputs.predictions.items():
     metrics[prediction_key] = _identity_metric_single(prediction_key,
                                                       prediction_value)
   metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
       feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
   metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
       _identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
                               model_outputs.end_state))
   metrics[metric_keys.MetricKeys.LOSS_MEAN] = metrics_impl.mean(
       model_outputs.loss, name="average_loss")
   return estimator_lib.EstimatorSpec(
       loss=model_outputs.loss,
       mode=mode,
       eval_metric_ops=metrics,
       # needed for custom metrics.
       predictions=model_outputs.predictions)
Ejemplo n.º 8
0
def mean_absolute_percentage_error(labels,
                                   predictions,
                                   weights=None,
                                   metrics_collections=None,
                                   updates_collections=None,
                                   name=None):
    """Computes the mean absolute percentage error between the labels and predictions.

  The `mean_absolute_percentage_error` function creates two local variables,
  `total` and `count` that are used to compute the mean absolute percentage error.
  This average is weighted by `weights`, and it is ultimately returned as
  `mean_absolute_percentage_error`: an idempotent operation that simply divides `total`
  by `count`.

  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `mean_absolute_percentage_error`. Internally, an `absolute_percentage_errors` operation
  computes the absolute value of the percentage differences between `predictions` and `labels`.
  Then `update_op` increments `total` with the reduced sum of the product of
  `weights` and `absolute_percentage_errors`, and it increments `count` with the reduced
  sum of `weights`

  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.

  Args:
    labels: A `Tensor` of the same shape as `predictions`.
    predictions: A `Tensor` of arbitrary shape.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `mean_absolute_percentage_error` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.

  Returns:
    mean_absolute_percentage_error: A `Tensor` representing the current mean, the value
    of `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean_absolute_percentage_error`.

  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
    RuntimeError: If eager execution is enabled.
  """
    if context.executing_eagerly():
        raise RuntimeError(
            'tf.metrics.mean_absolute_percentage_error is not supported '
            'when eager execution is enabled.')

    if predictions.dtype in (dtypes.float16, dtypes.float32, dtypes.float64) \
        and labels.dtype != predictions.dtype:
        labels = math_ops.cast(labels, predictions.dtype)
    elif labels.dtype in (dtypes.float16, dtypes.float32, dtypes.float64) \
        and labels.dtype != predictions.dtype:
        predictions = math_ops.cast(predictions, labels.dtype)
    else:
        labels = math_ops.cast(labels, dtypes.float32)
        predictions = math_ops.cast(predictions, dtypes.float32)

    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(
        predictions=predictions, labels=labels, weights=weights)
    min_value = constant_op.constant(EPSILON, dtype=dtypes.float32)
    max_value = constant_op.constant(float('Inf'), dtype=dtypes.float32)
    percentage_absolute_errors = 100 * math_ops.abs(
        (predictions - labels) / math_ops.abs(
            clip_ops.clip_by_value(math_ops.abs(labels), min_value,
                                   max_value)))
    return metrics_impl.mean(percentage_absolute_errors, weights,
                             metrics_collections, updates_collections, name
                             or 'mape')
Ejemplo n.º 9
0
 def eval_metrics_fn(global_step, loss):
   return {
       "global_step_observed": metrics_impl.mean(global_step),
       "loss": loss,
   }
Ejemplo n.º 10
0
 def eval_metrics_fn(partial, squared):
   return {
       "mean": metrics_impl.mean(partial),
       "loss": squared,
   }
Ejemplo n.º 11
0
 def eval_metrics_fn(prediction):
   return {"prediction": metrics_impl.mean(prediction)}
Ejemplo n.º 12
0
def _streaming_mean_absolute_persentace_error(predictions, labels):
    absolute_errors = math_ops.abs((predictions - labels) / labels)
    mean_t, update_op = metrics_impl.mean(absolute_errors, None, None, None,
                                          'mean_absolute_persentace_error')
    return tf.multiply(mean_t, 100.), update_op