Esempio n. 1
0
    def _get_eval_ops(self, features, targets, metrics):
        """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metric ops to run. If None, the default metric functions
        are used; if {}, no metrics are used. If model has one output (i.e.,
        returning single predction), keys are `str`, e.g. `'accuracy'` - just a
        name of the metric that will show up in the logs / summaries.
        Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`
        - name of the metric and name of `Tensor` in the predictions to run
        this metric on. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        ../../../../metrics/python/metrics/ops/streaming_metrics.py.

    Returns:
      metrics: `dict` of `Tensor` objects.

    Raises:
      ValueError: if `metrics` don't match `targets`.
    """
        predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
        result = {"loss": metrics_lib.streaming_mean(loss)}

        weights = self._get_weight_tensor(features)
        metrics = metrics or {}
        if isinstance(targets, dict) and len(targets) == 1:
            # Unpack single target into just tensor.
            targets = targets[list(targets.keys())[0]]
        for name, metric in six.iteritems(metrics):
            if isinstance(name, tuple):
                # Multi-head metrics.
                if not isinstance(predictions, dict):
                    raise ValueError(
                        "Metrics passed provide (name, prediction), "
                        "but predictions are not dict. "
                        "Metrics: %s, Predictions: %s." % (metrics, predictions)
                    )
                # Here are two options: targets are single Tensor or a dict.
                if isinstance(targets, dict) and name[1] in targets:
                    # If targets are dict and the prediction name is in it, apply metric.
                    result[name[0]] = metrics_lib.run_metric(metric, predictions[name[1]], targets[name[1]], weights)
                else:
                    # Otherwise pass the targets to the metric.
                    result[name[0]] = metrics_lib.run_metric(metric, predictions[name[1]], targets, weights)
            else:
                # Single head metrics.
                if isinstance(predictions, dict):
                    raise ValueError(
                        "Metrics passed provide only name, no prediction, "
                        "but predictions are dict. "
                        "Metrics: %s, Targets: %s." % (metrics, targets)
                    )
                result[name] = metrics_lib.run_metric(metric, predictions, targets, weights)
        return result
Esempio n. 2
0
def _run_metrics(predictions, targets, metrics, weights):
  result = {}
  targets = math_ops.cast(targets, predictions.dtype)
  for name, metric in six.iteritems(metrics or {}):
    result[name] = metrics_lib.run_metric(
        metric, predictions, targets, weights=weights)

  return result
Esempio n. 3
0
  def _get_eval_ops(self, features, targets, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metric ops to run. If None, the default metric functions
        are used; if {}, no metrics are used. If model has one output (i.e.,
        returning single predction), keys are `str`, e.g. `'accuracy'` - just a
        name of the metric that will show up in the logs / summaries.
        Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`
        - name of the metric and name of `Tensor` in the predictions to run
        this metric on. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        ../../../../metrics/python/metrics/ops/streaming_metrics.py.

    Returns:
      metrics: `dict` of `Tensor` objects.

    Raises:
      ValueError: if `metrics` don't match `targets`.
    """
    predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
    result = {'loss': metrics_lib.streaming_mean(loss)}

    weights = self._get_weight_tensor(features)
    metrics = metrics or {}
    if isinstance(targets, dict) and len(targets) == 1:
      # Unpack single target into just tensor.
      targets = targets[list(targets.keys())[0]]
    for name, metric in six.iteritems(metrics):
      if isinstance(name, tuple):
        # Multi-head metrics.
        if not isinstance(predictions, dict):
          raise ValueError(
              'Metrics passed provide (name, prediction), '
              'but predictions are not dict. '
              'Metrics: %s, Predictions: %s.' % (metrics, predictions))
        # Here are two options: targets are single Tensor or a dict.
        if isinstance(targets, dict) and name[1] in targets:
          # If targets are dict and the prediction name is in it, apply metric.
          result[name[0]] = metrics_lib.run_metric(
              metric, predictions[name[1]], targets[name[1]], weights)
        else:
          # Otherwise pass the targets to the metric.
          result[name[0]] = metrics_lib.run_metric(
              metric, predictions[name[1]], targets, weights)
      else:
        # Single head metrics.
        if isinstance(predictions, dict):
          raise ValueError(
              'Metrics passed provide only name, no prediction, '
              'but predictions are dict. '
              'Metrics: %s, Targets: %s.' % (metrics, targets))
        result[name] = metrics_lib.run_metric(
            metric, predictions, targets, weights)
    return result