Ejemplo n.º 1
0
    def test_subnetwork_metrics(self, use_tpu):
        spec = self._estimator_spec
        if not use_tpu:
            spec = spec.as_estimator_spec()
        metrics = _SubnetworkMetrics()
        metrics.create_eval_metrics(self._features, self._labels, spec,
                                    self._metric_fn)

        with self.test_session() as sess:
            actual = _run_metrics(sess, metrics.eval_metrics_tuple())

        expected = {"loss": 2., "metric_1": 1., "metric_2": 2.}
        self.assertEqual(actual, expected)
Ejemplo n.º 2
0
    def test_subnetwork_metrics(self, use_tpu):
        with context.graph_mode():
            self.setup_graph()
            spec = self._estimator_spec
            if not use_tpu:
                spec = spec.as_estimator_spec()
            metrics = _SubnetworkMetrics()
            metrics.create_eval_metrics(self._features, self._labels, spec,
                                        self._metric_fn)

            actual = self._run_metrics(metrics.eval_metrics_tuple())

            expected = {"loss": 2., "metric_1": 1., "metric_2": 2.}
            self.assertEqual(actual, expected)
Ejemplo n.º 3
0
  def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):

    overridden_value = 100.

    def _overriding_metric_fn():
      return {"metric_1": tf.metrics.mean(tf.constant(overridden_value))}

    metrics = _SubnetworkMetrics()
    metrics.create_eval_metrics(self._features, self._labels,
                                self._estimator_spec, _overriding_metric_fn)

    with self.test_session() as sess:
      actual = _run_metrics(sess, metrics.eval_metrics_tuple())

    expected = {"loss": 2., "metric_1": overridden_value}
    self.assertEqual(actual, expected)
Ejemplo n.º 4
0
def create_subnetwork_metrics(metric_fn,
                              use_tpu=False,
                              features=None,
                              labels=None,
                              estimator_spec=None):
    """Creates an instance of the _SubnetworkMetrics class.

  Args:
    metric_fn: A function which should obey the following signature:
    - Args: can only have following three arguments in any order:
        * predictions: Predictions `Tensor` or dict of `Tensor` created by given
          `Head`.
        * features: Input `dict` of `Tensor` objects created by `input_fn` which
          is given to `estimator.evaluate` as an argument.
        * labels:  Labels `Tensor` or dict of `Tensor` (for multi-head) created
          by `input_fn` which is given to `estimator.evaluate` as an argument.
      - Returns: Dict of metric results keyed by name. Final metrics are a union
        of this and `estimator`s existing metrics. If there is a name conflict
        between this and `estimator`s existing metrics, this will override the
        existing one. The values of the dict are the results of calling a metric
        function, namely a `(metric_tensor, update_op)` tuple.
    use_tpu: Whether to use TPU-specific variable sharing logic.
    features: Input `dict` of `Tensor` objects.
    labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
      (for multi-head).
    estimator_spec: The `EstimatorSpec` created by a `Head` instance.

  Returns:
    An instance of _SubnetworkMetrics.
  """

    if not estimator_spec:
        estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
            mode=tf.estimator.ModeKeys.EVAL,
            loss=tf.constant(2.),
            predictions=None,
            eval_metrics=None)
        if not use_tpu:
            estimator_spec = estimator_spec.as_estimator_spec()

    metrics = _SubnetworkMetrics(use_tpu=use_tpu)
    metrics.create_eval_metrics(features, labels, estimator_spec, metric_fn)

    return metrics
Ejemplo n.º 5
0
    def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):
        with context.graph_mode():
            self.setup_graph()
            overridden_value = 100.

            def _overriding_metric_fn():
                value = tf.constant(overridden_value)

                return {"metric_1": tf_compat.v1.metrics.mean(value)}

            metrics = _SubnetworkMetrics()
            metrics.create_eval_metrics(self._features, self._labels,
                                        self._estimator_spec,
                                        _overriding_metric_fn)

            actual = self._run_metrics(metrics.eval_metrics_tuple())

            expected = {"loss": 2., "metric_1": overridden_value}
            self.assertEqual(actual, expected)
Ejemplo n.º 6
0
  def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):

    overridden_value = 100.

    def _overriding_metric_fn():
      value = tf.constant(overridden_value)
      if tf.executing_eagerly():
        metric = tf.metrics.Mean()
        metric.update_state(value)
        return {"metric_1": metric}
      return {"metric_1": tf_compat.v1.metrics.mean(value)}

    metrics = _SubnetworkMetrics()
    metrics.create_eval_metrics(self._features, self._labels,
                                self._estimator_spec, _overriding_metric_fn)

    with self.test_session() as sess:
      actual = _run_metrics(sess, metrics.eval_metrics_tuple())

    expected = {"loss": 2., "metric_1": overridden_value}
    self.assertEqual(actual, expected)
Ejemplo n.º 7
0
  def build_subnetwork_spec(self,
                            name,
                            subnetwork_builder,
                            iteration_step,
                            summary,
                            features,
                            mode,
                            labels=None,
                            previous_ensemble=None):
    """Builds a `_SubnetworkSpec` from the given `adanet.subnetwork.Builder`.

    Args:
      name: String name of the subnetwork.
      subnetwork_builder: A `adanet.Builder` instance which defines how to train
        the subnetwork and ensemble mixture weights.
      iteration_step: Integer `Tensor` representing the step since the beginning
        of the current iteration, as opposed to the global step.
      summary: A `_ScopedSummary` instance for recording ensemble summaries.
      features: Input `dict` of `Tensor` objects.
      mode: Estimator's `ModeKeys`.
      labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
        (for multi-head). Can be `None`.
      previous_ensemble: The previous `Ensemble` from iteration t-1. Used for
        creating the subnetwork train_op.

    Returns:
      An new `EnsembleSpec` instance with the `Subnetwork` appended.
    """

    before_var_list = tf.trainable_variables()
    with tf.variable_scope("subnetwork_{}".format(name)):
      build_subnetwork = functools.partial(
          subnetwork_builder.build_subnetwork,
          features=features,
          logits_dimension=self._head.logits_dimension,
          training=mode == tf.estimator.ModeKeys.TRAIN,
          iteration_step=iteration_step,
          summary=summary,
          previous_ensemble=previous_ensemble)
      # Check which args are in the implemented build_subnetwork method
      # signature for backwards compatibility.
      defined_args = inspect.getargspec(
          subnetwork_builder.build_subnetwork).args
      if "labels" in defined_args:
        build_subnetwork = functools.partial(build_subnetwork, labels=labels)
      subnetwork_scope = tf.get_variable_scope()
      with summary.current_scope(), _monkey_patch_context(
          iteration_step_scope=subnetwork_scope,
          scoped_summary=summary,
          trainable_vars=[]):
        subnetwork = build_subnetwork()
      subnetwork_var_list = _new_trainable_variables(before_var_list)

      estimator_spec = _create_estimator_spec(
          self._head, features, labels, mode, subnetwork.logits, self._use_tpu)

      subnetwork_metrics = _SubnetworkMetrics()
      if mode == tf.estimator.ModeKeys.EVAL:
        subnetwork_metrics.create_eval_metrics(
            features=features,
            labels=labels,
            estimator_spec=estimator_spec,
            metric_fn=self._metric_fn)

      if mode == tf.estimator.ModeKeys.TRAIN:
        with summary.current_scope():
          summary.scalar("loss", estimator_spec.loss)

      # Create train ops for training subnetworks and ensembles.
      train_op = None
      if mode == tf.estimator.ModeKeys.TRAIN and subnetwork_builder:
        with summary.current_scope(), _monkey_patch_context(
            iteration_step_scope=subnetwork_scope,
            scoped_summary=summary,
            trainable_vars=subnetwork_var_list):
          train_op = _to_train_op_spec(
              subnetwork_builder.build_subnetwork_train_op(
                  subnetwork=subnetwork,
                  loss=estimator_spec.loss,
                  var_list=subnetwork_var_list,
                  labels=labels,
                  iteration_step=iteration_step,
                  summary=summary,
                  previous_ensemble=previous_ensemble))
    return _SubnetworkSpec(
        name=name,
        subnetwork=subnetwork,
        builder=subnetwork_builder,
        predictions=estimator_spec.predictions,
        loss=estimator_spec.loss,
        train_op=train_op,
        eval_metrics=subnetwork_metrics.eval_metrics_tuple())
    def build_subnetwork_spec(self,
                              name,
                              subnetwork_builder,
                              summary,
                              features,
                              mode,
                              labels=None,
                              previous_ensemble=None,
                              config=None):
        """Builds a `_SubnetworkSpec` from the given `adanet.subnetwork.Builder`.

    Args:
      name: String name of the subnetwork.
      subnetwork_builder: A `adanet.Builder` instance which defines how to train
        the subnetwork and ensemble mixture weights.
      summary: A `_ScopedSummary` instance for recording ensemble summaries.
      features: Input `dict` of `Tensor` objects.
      mode: Estimator's `ModeKeys`.
      labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
        (for multi-head). Can be `None`.
      previous_ensemble: The previous `Ensemble` from iteration t-1. Used for
        creating the subnetwork train_op.
      config: The `tf.estimator.RunConfig` to use this iteration.

    Returns:
      An new `EnsembleSpec` instance with the `Subnetwork` appended.
    """

        old_vars = _get_current_vars()

        with tf_compat.v1.variable_scope("subnetwork_{}".format(name)):
            step = tf_compat.v1.get_variable(
                "step",
                shape=[],
                initializer=tf_compat.v1.zeros_initializer(),
                trainable=False,
                dtype=tf.int64)

            # Convert to tensor so that users cannot mutate it.
            step_tensor = tf.convert_to_tensor(value=step)
            with summary.current_scope():
                summary.scalar("iteration_step/adanet/iteration_step",
                               step_tensor)
            if config:
                subnetwork_config = config.replace(
                    model_dir=os.path.join(config.model_dir, "assets", name))
            else:
                subnetwork_config = tf.estimator.RunConfig(
                    session_config=tf.compat.v1.ConfigProto(
                        gpu_options=tf.compat.v1.GPUOptions(
                            allow_growth=True)))

            build_subnetwork = functools.partial(
                subnetwork_builder.build_subnetwork,
                features=features,
                logits_dimension=self._head.logits_dimension,
                training=mode == tf.estimator.ModeKeys.TRAIN,
                iteration_step=step_tensor,
                summary=summary,
                previous_ensemble=previous_ensemble)
            # Check which args are in the implemented build_subnetwork method
            # signature for backwards compatibility.
            # Calling low level getargs for py_2_and_3 compatibility.
            defined_args = inspect.getargs(
                subnetwork_builder.build_subnetwork.__code__).args
            if "labels" in defined_args:
                build_subnetwork = functools.partial(build_subnetwork,
                                                     labels=labels)
            if "config" in defined_args:
                build_subnetwork = functools.partial(build_subnetwork,
                                                     config=subnetwork_config)
            subnetwork_scope = tf_compat.v1.get_variable_scope()
            with summary.current_scope(), _monkey_patch_context(
                    iteration_step_scope=subnetwork_scope,
                    scoped_summary=summary,
                    trainable_vars=[]):
                subnetwork = build_subnetwork()

            subnetwork_var_list = _get_current_vars(
                diffbase=old_vars)["trainable"]

            estimator_spec = _create_estimator_spec(self._head, features,
                                                    labels, mode,
                                                    subnetwork.logits,
                                                    self._use_tpu)

            subnetwork_metrics = _SubnetworkMetrics(self._use_tpu)
            if mode == tf.estimator.ModeKeys.EVAL:
                subnetwork_metrics.create_eval_metrics(
                    features=features,
                    labels=labels,
                    estimator_spec=estimator_spec,
                    metric_fn=self._metric_fn)

            if mode == tf.estimator.ModeKeys.TRAIN:
                with summary.current_scope():
                    summary.scalar("loss", estimator_spec.loss)

            # Create train ops for training subnetworks and ensembles.
            train_op = None
            if mode == tf.estimator.ModeKeys.TRAIN and subnetwork_builder:
                with summary.current_scope(), _monkey_patch_context(
                        iteration_step_scope=subnetwork_scope,
                        scoped_summary=summary,
                        trainable_vars=subnetwork_var_list):
                    train_op = _to_train_op_spec(
                        subnetwork_builder.build_subnetwork_train_op(
                            subnetwork=subnetwork,
                            loss=estimator_spec.loss,
                            var_list=subnetwork_var_list,
                            labels=labels,
                            iteration_step=step_tensor,
                            summary=summary,
                            previous_ensemble=previous_ensemble))

            new_vars = _get_current_vars(diffbase=old_vars)
            # Sort our dictionary by key to remove non-determinism of variable order.
            new_vars = collections.OrderedDict(sorted(new_vars.items()))
            # Combine all trainable, global and savable variables into a single list.
            subnetwork_variables = sum(new_vars.values(), []) + [step]

        return _SubnetworkSpec(name=name,
                               subnetwork=subnetwork,
                               builder=subnetwork_builder,
                               predictions=estimator_spec.predictions,
                               variables=subnetwork_variables,
                               loss=estimator_spec.loss,
                               step=step,
                               train_op=train_op,
                               eval_metrics=subnetwork_metrics,
                               asset_dir=subnetwork_config.model_dir)
Ejemplo n.º 9
0
    def build_subnetwork_spec(self,
                              name,
                              subnetwork_builder,
                              summary,
                              features,
                              mode,
                              labels=None,
                              previous_ensemble=None):
        """Builds a `_SubnetworkSpec` from the given `adanet.subnetwork.Builder`.

    Args:
      name: String name of the subnetwork.
      subnetwork_builder: A `adanet.Builder` instance which defines how to train
        the subnetwork and ensemble mixture weights.
      summary: A `_ScopedSummary` instance for recording ensemble summaries.
      features: Input `dict` of `Tensor` objects.
      mode: Estimator's `ModeKeys`.
      labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
        (for multi-head). Can be `None`.
      previous_ensemble: The previous `Ensemble` from iteration t-1. Used for
        creating the subnetwork train_op.

    Returns:
      An new `EnsembleSpec` instance with the `Subnetwork` appended.
    """

        before_var_list = tf_compat.v1.trainable_variables()
        with tf_compat.v1.variable_scope("subnetwork_{}".format(name)):
            step = tf_compat.v1.get_variable(
                "step",
                shape=[],
                initializer=tf_compat.v1.zeros_initializer(),
                trainable=False,
                dtype=tf.int64)

            # Convert to tensor so that users cannot mutate it.
            step_tensor = tf.convert_to_tensor(value=step)
            with summary.current_scope():
                summary.scalar("iteration_step/adanet/iteration_step",
                               step_tensor)
            build_subnetwork = functools.partial(
                subnetwork_builder.build_subnetwork,
                features=features,
                logits_dimension=self._head.logits_dimension,
                training=mode == tf.estimator.ModeKeys.TRAIN,
                iteration_step=step_tensor,
                summary=summary,
                previous_ensemble=previous_ensemble)
            # Check which args are in the implemented build_subnetwork method
            # signature for backwards compatibility.
            defined_args = inspect.getargspec(
                subnetwork_builder.build_subnetwork).args
            if "labels" in defined_args:
                build_subnetwork = functools.partial(build_subnetwork,
                                                     labels=labels)
            subnetwork_scope = tf_compat.v1.get_variable_scope()
            with summary.current_scope(), _monkey_patch_context(
                    iteration_step_scope=subnetwork_scope,
                    scoped_summary=summary,
                    trainable_vars=[]):
                subnetwork = build_subnetwork()
            subnetwork_var_list = _new_trainable_variables(before_var_list)

            estimator_spec = _create_estimator_spec(self._head, features,
                                                    labels, mode,
                                                    subnetwork.logits,
                                                    self._use_tpu)

            subnetwork_metrics = _SubnetworkMetrics(self._use_tpu)
            if mode == tf.estimator.ModeKeys.EVAL:
                subnetwork_metrics.create_eval_metrics(
                    features=features,
                    labels=labels,
                    estimator_spec=estimator_spec,
                    metric_fn=self._metric_fn)

            if mode == tf.estimator.ModeKeys.TRAIN:
                with summary.current_scope():
                    summary.scalar("loss", estimator_spec.loss)

            if self._max_steps is not None:
                # Train this candidate for `max_steps` steps.
                # NOTE: During training, the iteration step gets incremented at the very
                # end of the computation graph, so we need to account for that here.
                is_training = tf.less(
                    step_tensor +
                    1 if mode == tf.estimator.ModeKeys.TRAIN else 0,
                    self._max_steps,
                    name="is_training")
            else:
                # Train this candidate forever.
                is_training = tf.constant(True, name="is_training")

            # Create train ops for training subnetworks and ensembles.
            train_op = None
            if mode == tf.estimator.ModeKeys.TRAIN and subnetwork_builder:
                with summary.current_scope(), _monkey_patch_context(
                        iteration_step_scope=subnetwork_scope,
                        scoped_summary=summary,
                        trainable_vars=subnetwork_var_list):
                    train_op = _to_train_op_spec(
                        subnetwork_builder.build_subnetwork_train_op(
                            subnetwork=subnetwork,
                            loss=estimator_spec.loss,
                            var_list=subnetwork_var_list,
                            labels=labels,
                            iteration_step=step_tensor,
                            summary=summary,
                            previous_ensemble=previous_ensemble))
        return _SubnetworkSpec(
            name=name,
            subnetwork=subnetwork,
            builder=subnetwork_builder,
            predictions=estimator_spec.predictions,
            loss=estimator_spec.loss,
            step=step,
            is_training=is_training,
            train_op=train_op,
            eval_metrics=subnetwork_metrics.eval_metrics_tuple())