Пример #1
0
  def _create_modalities(self, problem_hparams, hparams):
    """Construct modalities in problem_hparams."""

    input_modality_overrides = {}
    for override_str in hparams.input_modalities.split(";"):
      if override_str != "default":
        parts = override_str.split(":")
        feature_name = parts[0]
        modality_name = ":".join(parts[1:])
        input_modality_overrides[feature_name] = modality_name

    target_modality_name = None
    if hparams.target_modality and hparams.target_modality != "default":
      target_modality_name = hparams.target_modality

    input_modality = {}
    for f, modality_spec in six.iteritems(problem_hparams.input_modality):
      if f in input_modality_overrides:
        _warn_changed_modality_type(input_modality_overrides[f],
                                    modality_spec[0], f)
        modality_spec = (input_modality_overrides[f], modality_spec[1])
      input_modality[f] = registry.create_modality(modality_spec, hparams)
    problem_hparams.input_modality = input_modality

    target_modality_spec = problem_hparams.target_modality
    if target_modality_name:
      _warn_changed_modality_type(target_modality_name, target_modality_spec[0],
                                  "target")
      target_modality_spec = (target_modality_name, target_modality_spec[1])
    target_modality = registry.create_modality(target_modality_spec, hparams)
    problem_hparams.target_modality = target_modality
Пример #2
0
def _create_tpu_eval_metrics_fn(problem, hparams):
  """Create the metrics_fn that TPUEstimatorSpec expects."""

  tm = problem.get_hparams().target_modality
  if isinstance(tm, tuple):
    tm = registry.create_modality(tm, hparams)
  weights_fn = tm.targets_weights_fn

  def make_metric_fn(metric_fn):

    def wrapped_metric_fn(logits, labels):
      num, den = metric_fn(logits, labels, weights_fn=weights_fn)
      return tf.metrics.mean(num, den)

    return wrapped_metric_fn

  metric_fns = []
  eval_metrics = problem.eval_metrics()

  for metric in eval_metrics:
    if metric in TPU_METRIC_BLACKLIST:
      tf.logging.warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
      continue
    name = "metrics-%s/%s" % (problem.name, metric)
    metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))

  def all_metrics_fn(logits, labels):
    metrics_dict = {}

    for name, fn in metric_fns:
      metrics_dict[name] = fn(logits, labels)

    return metrics_dict

  return all_metrics_fn
Пример #3
0
def create_eager_metrics_for_problem(problem, model_hparams=None):
  """See create_eager_metrics."""
  metric_names = problem.eval_metrics()
  tm = problem.get_hparams().target_modality
  if isinstance(tm, tuple):
    assert model_hparams is not None
    tm = registry.create_modality(tm, model_hparams)
  return create_eager_metrics(metric_names, weights_fn=tm.targets_weights_fn)
Пример #4
0
    def _create_modalities(self, problem_hparams, hparams):
        """Construct modalities in problem_hparams."""

        input_modality_overrides = {}
        for override_str in hparams.input_modalities.split(";"):
            parts = override_str.split(":")
            feature_name = parts[0]
            modality_name = ":".join(parts[1:])
            input_modality_overrides[feature_name] = modality_name

        target_modality_name = None
        if hparams.target_modality:
            target_modality_name = hparams.target_modality

        input_modality = {}
        for f, modality_spec in six.iteritems(problem_hparams.input_modality):
            if isinstance(modality_spec, modality.Modality):
                # This function has been previously run (e.g. for training and now is
                # being called for eval) and the modalities have already been
                # constructed. Return.
                return
            if f in input_modality_overrides:
                _warn_changed_modality_type(input_modality_overrides[f],
                                            modality_spec[0], f)
                modality_spec = (input_modality_overrides[f], modality_spec[1])
            input_modality[f] = registry.create_modality(
                modality_spec, hparams)
        problem_hparams.input_modality = input_modality

        target_modality_spec = problem_hparams.target_modality
        if isinstance(target_modality_spec, modality.Modality):
            return
        if target_modality_name:
            _warn_changed_modality_type(target_modality_name,
                                        target_modality_spec[0], "target")
            target_modality_spec = (target_modality_name,
                                    target_modality_spec[1])
        target_modality = registry.create_modality(target_modality_spec,
                                                   hparams)
        problem_hparams.target_modality = target_modality
Пример #5
0
  def _create_modalities(self, problem_hparams, hparams):
    """Construct modalities in problem_hparams."""

    input_modality_overrides = {}
    for override_str in hparams.input_modalities.split(";"):
      if override_str != "default":
        parts = override_str.split(":")
        feature_name = parts[0]
        modality_name = ":".join(parts[1:])
        input_modality_overrides[feature_name] = modality_name

    target_modality_name = None
    if hparams.target_modality and hparams.target_modality != "default":
      target_modality_name = hparams.target_modality

    input_modality = {}
    for f, modality_spec in six.iteritems(problem_hparams.input_modality):
      if f in input_modality_overrides:
        _warn_changed_modality_type(input_modality_overrides[f],
                                    modality_spec[0], f)
        modality_spec = (input_modality_overrides[f], modality_spec[1])
      input_modality[f] = registry.create_modality(modality_spec, hparams)
    problem_hparams.input_modality = input_modality

    if isinstance(problem_hparams.target_modality, dict):
      target_modality = {}
      for f, modality_spec in six.iteritems(problem_hparams.target_modality):
        if target_modality_name:
          _warn_changed_modality_type(target_modality_name, modality_spec[0],
                                      "target_modality/%s" % f)
          modality_spec = (target_modality_name, modality_spec[1])
        target_modality[f] = registry.create_modality(modality_spec, hparams)
    else:
      target_modality_spec = problem_hparams.target_modality
      if target_modality_name:
        _warn_changed_modality_type(target_modality_name,
                                    target_modality_spec[0], "target")
        target_modality_spec = (target_modality_name, target_modality_spec[1])
      target_modality = registry.create_modality(target_modality_spec, hparams)
    problem_hparams.target_modality = target_modality
Пример #6
0
def _create_tpu_eval_metrics_fn(problem, hparams):
  """Create the metrics_fn that TPUEstimatorSpec expects."""

  tm = problem.get_hparams().target_modality
  if isinstance(tm, tuple):
    tm = registry.create_modality(tm, hparams)
  weights_fn = tm.targets_weights_fn

  def make_metric_fn(metric_fn):

    def wrapped_metric_fn(logits, labels):
      num, den = metric_fn(logits, labels, weights_fn=weights_fn)
      return tf.metrics.mean(num, den)

    return wrapped_metric_fn

  metric_fns = []
  eval_metrics = problem.eval_metrics()

  for metric in eval_metrics:
    if metric in TPU_METRIC_BLACKLIST:
      log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
      continue
    name = "metrics-%s/%s" % (problem.name, metric)
    metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))

  def all_metrics_fn(logits=None, labels=None, **kwargs):
    """Construct metrics dictionary."""
    metrics_dict = {}

    if logits is None:
      logits = kwargs

    for name, fn in metric_fns:
      if isinstance(logits, dict):
        for k, v in logits.iteritems():
          metrics_dict["%s/%s" % (name, k)] = fn(v, labels)
      else:
        metrics_dict[name] = fn(logits, labels)

    return metrics_dict

  return all_metrics_fn
Пример #7
0
def create_evaluation_metrics(problems, model_hparams):
    """Creates the evaluation metrics for the model.

  Args:
    problems: List of Problem instances.
    model_hparams: a set of hparams.

  Returns:
    dict<metric name, metric function>. The metric functions have signature
    (Tensor predictions, features) -> (metric Tensor, update op), where features
    is a dict with keys {targets}.

  Raises:
    ValueError: if the metrics specified by a problem are not recognized (i.e.
      are not defined in the Metrics enum.
  """
    def reduce_dimensions(predictions, labels):
        """Reduce dimensions for high-dimensional predictions and labels."""
        # We will treat first dimensions as batch. One example are video frames.
        if len(predictions.get_shape()) > 5:
            predictions_shape = common_layers.shape_list(predictions)
            predictions = tf.reshape(predictions, [
                predictions_shape[0], predictions_shape[1], -1,
                predictions_shape[-1]
            ])
            labels_shape = common_layers.shape_list(labels)
            labels = tf.reshape(labels, [labels_shape[0], labels_shape[1], -1])
        return predictions, labels

    def make_problem_specific_metric_fn(metric_fn, weights_fn):
        """Create a metric fn."""
        def problem_metric_fn(predictions, features, labels):
            """Metric fn."""
            # Send along the entire features dict if the metric fn has the kwarg
            # "features".
            kwargs = {}
            args, _, keywords, _ = inspect.getargspec(metric_fn)
            if ("features" in args) or keywords:
                kwargs["features"] = features

            predictions, labels = reduce_dimensions(predictions, labels)

            scores, weights = metric_fn(predictions,
                                        labels,
                                        weights_fn=weights_fn,
                                        **kwargs)
            return tf.metrics.mean(scores, weights)

        return problem_metric_fn

    def make_image_wrapped_metric_fn(metric_fn):
        """Metric fn without tf.metrics.mean."""
        def image_wrapped_metric_fn(predictions,
                                    features,
                                    labels,
                                    weights_fn=common_layers.weights_all):
            del weights_fn
            del features
            predictions, labels = reduce_dimensions(predictions, labels)
            return metric_fn(predictions, labels, model_hparams)

        return image_wrapped_metric_fn

    def weights_fn_for_mp(problem_task_id):
        return lambda x: common_layers.weights_multi_problem(
            x, problem_task_id)

    eval_metrics = dict()
    for problem_instance in problems:
        problem_name = problem_instance.name
        metrics = problem_instance.eval_metrics()
        if hasattr(model_hparams.problem, "task_list"):
            metrics = model_hparams.problem.eval_metrics()
        if not all([m in METRICS_FNS for m in metrics]):
            error_str = ("Unrecognized metric. Problem %s specified metrics "
                         "%s. Recognized metrics are %s.")
            raise ValueError(error_str %
                             (problem_name, metrics, list(METRICS_FNS.keys())))

        tm = problem_instance.get_hparams().target_modality
        if not isinstance(tm, dict):
            tm = {"targets": tm}

        for target_name, modality in six.iteritems(tm):
            if isinstance(modality, tuple):
                modality = registry.create_modality(modality, model_hparams)
            weights_fn = modality.targets_weights_fn
            if hasattr(model_hparams.problem, "task_list"):
                ptid = problem_instance.task_id  # pylint: disable=cell-var-from-loop
                weights_fn = weights_fn_for_mp(ptid)

            for metric in metrics:
                metric_fn = METRICS_FNS[metric]
                metric_name = "metrics-%s/%s/%s" % (problem_name, target_name,
                                                    metric)
                if metric == Metrics.IMAGE_SUMMARY:
                    eval_metrics[metric_name] = make_image_wrapped_metric_fn(
                        metric_fn)
                else:
                    eval_metrics[
                        metric_name] = make_problem_specific_metric_fn(
                            metric_fn, weights_fn)

    return eval_metrics
Пример #8
0
def create_evaluation_metrics(problems, model_hparams):
  """Creates the evaluation metrics for the model.

  Args:
    problems: List of Problem instances.
    model_hparams: a set of hparams.

  Returns:
    dict<metric name, metric function>. The metric functions have signature
    (Tensor predictions, features) -> (metric Tensor, update op), where features
    is a dict with keys {targets, problem_choice}.

  Raises:
    ValueError: if the metrics specified by a problem are not recognized (i.e.
      are not defined in the Metrics enum.
  """

  def make_problem_specific_metric_fn(metric_fn, problem_idx, weights_fn):
    """Create a metric fn conditioned on problem_idx."""

    def problem_metric_fn(predictions, features):
      """Metric fn."""
      labels = features.get("targets", None)
      problem_choice = features.get("problem_choice", 0)

      # Send along the entire features dict if the metric fn has the kwarg
      # "features".
      kwargs = {}
      args, _, keywords, _ = inspect.getargspec(metric_fn)
      if ("features" in args) or keywords:
        kwargs["features"] = features

      def wrapped_metric_fn():
        return metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs)

      (scores, weights) = tf.cond(
          tf.equal(problem_idx, problem_choice), wrapped_metric_fn,
          lambda: (tf.constant(0.0), tf.constant(0.0)))
      # The tf.metrics.mean function assures correct aggregation.
      return tf.metrics.mean(scores, weights)

    return problem_metric_fn

  eval_metrics = dict()
  for problem_idx, problem_instance in enumerate(problems):
    problem_name = problem_instance.name
    metrics = problem_instance.eval_metrics()
    if not all([m in METRICS_FNS for m in metrics]):
      error_str = ("Unrecognized metric. Problem %s specified metrics "
                   "%s. Recognized metrics are %s.")
      raise ValueError(error_str % (problem_name,
                                    metrics,
                                    list(METRICS_FNS.keys())))

    def image_wrapped_metric_fn(predictions,
                                labels,
                                weights_fn=common_layers.weights_nonzero):
      _, _ = labels, weights_fn
      return metric_fn(predictions, model_hparams)

    tm = problem_instance.get_hparams().target_modality
    if isinstance(tm, tuple):
      tm = registry.create_modality(tm, model_hparams)
    weights_fn = tm.targets_weights_fn

    for metric in metrics:
      metric_fn = METRICS_FNS[metric]
      metric_name = "metrics-%s/%s" % (problem_name, metric)
      if metric == Metrics.IMAGE_SUMMARY:
        eval_metrics[metric_name] = image_wrapped_metric_fn
      else:
        problem_metric_fn = make_problem_specific_metric_fn(
            metric_fn, problem_idx, weights_fn)
        eval_metrics[metric_name] = problem_metric_fn

  return eval_metrics
Пример #9
0
def create_evaluation_metrics(problems, model_hparams):
    """Creates the evaluation metrics for the model.

  Args:
    problems: List of Problem instances.
    model_hparams: a set of hparams.

  Returns:
    dict<metric name, metric function>. The metric functions have signature
    (Tensor predictions, features) -> (metric Tensor, update op), where features
    is a dict with keys {targets}.

  Raises:
    ValueError: if the metrics specified by a problem are not recognized (i.e.
      are not defined in the Metrics enum.
  """
    def reduce_dimensions(predictions, labels):
        """Reduce dimensions for high-dimensional predictions and labels."""
        # We will treat first dimensions as batch. One example are video frames.
        if len(predictions.get_shape()) > 5:
            predictions = tf.reshape(
                predictions, [-1] + common_layers.shape_list(predictions)[-4:])
        if len(labels.get_shape()) > 4:
            labels = tf.reshape(labels,
                                [-1] + common_layers.shape_list(labels)[-3:])
        return predictions, labels

    def make_problem_specific_metric_fn(metric_fn, weights_fn):
        """Create a metric fn."""
        def problem_metric_fn(predictions, features, labels):
            """Metric fn."""
            # Send along the entire features dict if the metric fn has the kwarg
            # "features".
            kwargs = {}
            args, _, keywords, _ = inspect.getargspec(metric_fn)
            if ("features" in args) or keywords:
                kwargs["features"] = features

            predictions, labels = reduce_dimensions(predictions, labels)

            scores, weights = metric_fn(predictions,
                                        labels,
                                        weights_fn=weights_fn,
                                        **kwargs)
            return tf.metrics.mean(scores, weights)

        return problem_metric_fn

    eval_metrics = dict()
    for problem_instance in problems:
        problem_name = problem_instance.name
        metrics = problem_instance.eval_metrics()
        if not all([m in METRICS_FNS for m in metrics]):
            error_str = ("Unrecognized metric. Problem %s specified metrics "
                         "%s. Recognized metrics are %s.")
            raise ValueError(error_str %
                             (problem_name, metrics, list(METRICS_FNS.keys())))

        def image_wrapped_metric_fn(predictions,
                                    features,
                                    labels,
                                    weights_fn=common_layers.weights_all):
            del weights_fn
            del features
            predictions, labels = reduce_dimensions(predictions, labels)
            return metric_fn(predictions, labels, model_hparams)

        tm = problem_instance.get_hparams().target_modality
        if isinstance(tm, dict):
            for k, v in six.iteritems(tm):
                if isinstance(v, tuple):
                    v = registry.create_modality(v, model_hparams)
                weights_fn = v.targets_weights_fn

                for metric in metrics:
                    metric_fn = METRICS_FNS[metric]
                    metric_name = "metrics-%s/%s/%s" % (problem_name, k,
                                                        metric)
                    if metric == Metrics.IMAGE_SUMMARY:
                        eval_metrics[metric_name] = image_wrapped_metric_fn
                    else:
                        problem_metric_fn = make_problem_specific_metric_fn(
                            metric_fn, weights_fn)
                        eval_metrics[metric_name] = problem_metric_fn
        else:
            if isinstance(tm, tuple):
                tm = registry.create_modality(tm, model_hparams)
            weights_fn = tm.targets_weights_fn

            for metric in metrics:
                metric_fn = METRICS_FNS[metric]
                metric_name = "metrics-%s/%s" % (problem_name, metric)
                if metric == Metrics.IMAGE_SUMMARY:
                    eval_metrics[metric_name] = image_wrapped_metric_fn
                else:
                    problem_metric_fn = make_problem_specific_metric_fn(
                        metric_fn, weights_fn)
                    eval_metrics[metric_name] = problem_metric_fn

    return eval_metrics
Пример #10
0
def _create_tpu_eval_metrics_fn(problem, hparams):
  """Create the metrics_fn that TPUEstimatorSpec expects."""

  metric_fns = []
  eval_metrics = problem.eval_metrics()

  tm = problem.get_hparams().target_modality
  if isinstance(tm, dict):
    for k, v in six.iteritems(tm):
      if isinstance(v, tuple):
        v = registry.create_modality(v, hparams)
      weights_fn = v.targets_weights_fn

      def make_metric_fn(metric_fn):

        def wrapped_metric_fn(logits, labels, weights_fn=weights_fn):
          num, den = metric_fn(logits, labels, weights_fn=weights_fn)
          return tf.metrics.mean(num, den)

        return wrapped_metric_fn

      for metric in eval_metrics:
        if metric in TPU_METRIC_BLACKLIST:
          log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
          continue
        name = "%s/metrics-%s/%s" % (k, problem.name, metric)
        metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
  else:
    if isinstance(tm, tuple):
      tm = registry.create_modality(tm, hparams)
    weights_fn = tm.targets_weights_fn

    def make_metric_fn(metric_fn):

      def wrapped_metric_fn(logits, labels):
        num, den = metric_fn(logits, labels, weights_fn=weights_fn)
        return tf.metrics.mean(num, den)

      return wrapped_metric_fn

    for metric in eval_metrics:
      if metric in TPU_METRIC_BLACKLIST:
        log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
        continue
      name = "metrics-%s/%s" % (problem.name, metric)
      metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))

  def all_metrics_fn(logits=None, labels=None, **kwargs):
    """Construct metrics dictionary."""
    metrics_dict = {}

    if logits is None:
      logits = kwargs

    for name, fn in metric_fns:
      if isinstance(logits, dict):
        for k, v in six.iteritems(logits):
          if isinstance(labels, dict):
            metrics_dict["%s/%s" % (name, k)] = fn(v, labels[k])
          else:
            metrics_dict["%s/%s" % (name, k)] = fn(v, labels)
      else:
        metrics_dict[name] = fn(logits, labels)

    return metrics_dict

  return all_metrics_fn
Пример #11
0
def create_evaluation_metrics(problems, model_hparams):
    """Creates the evaluation metrics for the model.

  Args:
    problems: List of Problem instances.
    model_hparams: a set of hparams.

  Returns:
    dict<metric name, metric function>. The metric functions have signature
    (Tensor predictions, features) -> (metric Tensor, update op), where features
    is a dict with keys {targets, problem_choice}.

  Raises:
    ValueError: if the metrics specified by a problem are not recognized (i.e.
      are not defined in the Metrics enum.
  """
    def make_problem_specific_metric_fn(metric_fn, problem_idx, weights_fn):
        """Create a metric fn conditioned on problem_idx."""
        def problem_metric_fn(predictions, features):
            """Metric fn."""
            labels = features.get("targets", None)
            problem_choice = features.get("problem_choice", 0)

            # Send along the entire features dict if the metric fn has the kwarg
            # "features".
            kwargs = {}
            args, _, keywords, _ = inspect.getargspec(metric_fn)
            if ("features" in args) or keywords:
                kwargs["features"] = features

            def wrapped_metric_fn():
                return metric_fn(predictions,
                                 labels,
                                 weights_fn=weights_fn,
                                 **kwargs)

            (scores,
             weights) = tf.cond(tf.equal(problem_idx,
                                         problem_choice), wrapped_metric_fn,
                                lambda: (tf.constant(0.0), tf.constant(0.0)))
            # The tf.metrics.mean function assures correct aggregation.
            return tf.metrics.mean(scores, weights)

        return problem_metric_fn

    eval_metrics = dict()
    for problem_idx, problem_instance in enumerate(problems):
        problem_name = problem_instance.name
        metrics = problem_instance.eval_metrics()
        if not all([m in METRICS_FNS for m in metrics]):
            error_str = ("Unrecognized metric. Problem %s specified metrics "
                         "%s. Recognized metrics are %s.")
            raise ValueError(error_str %
                             (problem_name, metrics, list(METRICS_FNS.keys())))

        def image_wrapped_metric_fn(predictions,
                                    labels,
                                    weights_fn=common_layers.weights_nonzero):
            _, _ = labels, weights_fn
            return metric_fn(predictions, model_hparams)

        tm = problem_instance.get_hparams().target_modality
        if isinstance(tm, tuple):
            tm = registry.create_modality(tm, model_hparams)
        weights_fn = tm.targets_weights_fn

        for metric in metrics:
            metric_fn = METRICS_FNS[metric]
            metric_name = "metrics-%s/%s" % (problem_name, metric)
            if metric == Metrics.IMAGE_SUMMARY:
                eval_metrics[metric_name] = image_wrapped_metric_fn
            else:
                problem_metric_fn = make_problem_specific_metric_fn(
                    metric_fn, problem_idx, weights_fn)
                eval_metrics[metric_name] = problem_metric_fn

    return eval_metrics