Example #1
0
    def test_predict_outputs_valid(self):
        """Tests that no errors are raised when provided outputs are valid."""
        outputs = {
            "output0": constant_op.constant([0]),
            u"output1": constant_op.constant(["foo"]),
        }
        export_output_lib.PredictOutput(outputs)

        # Single Tensor is OK too
        export_output_lib.PredictOutput(constant_op.constant([0]))
Example #2
0
    def test_predict_outputs_invalid(self):
        with self.assertRaisesRegexp(ValueError,
                                     "Prediction output key must be a string"):
            export_output_lib.PredictOutput({1: constant_op.constant([0])})

        with self.assertRaisesRegexp(
                ValueError, "Prediction output value must be a Tensor"):
            export_output_lib.PredictOutput({
                "prediction1":
                sparse_tensor.SparseTensor(indices=[[0, 0]],
                                           values=[1],
                                           dense_shape=[1, 1]),
            })
Example #3
0
  def _merge_predict_export_outputs(self, all_estimator_spec):
    """Merges list of `EstimatorSpec` export_outputs for PREDICT.

    For each individual head, its DEFAULT_SERVING_KEY and PREDICT_SERVING_KEY
    are extracted and merged for `export_outputs` in PREDICT mode of
    `EstimatorSpec`. By default, the first head is served.

    Args:
      all_estimator_spec: list of `EstimatorSpec` for the individual heads.

    Returns:
      A dict of merged export_outputs from all heads for PREDICT.
    """
    # The first head is used for serving by default.
    export_outputs = {
        base_head.DEFAULT_SERVING_KEY: _default_export_output(
            all_estimator_spec[0].export_outputs,
            self._heads[0].name),
    }
    merged_predict_outputs = {}
    for head, spec in zip(self._heads, all_estimator_spec):
      for k, v in six.iteritems(spec.export_outputs):
        # Collect default serving key for export_outputs
        key = (head.name if k == base_head.DEFAULT_SERVING_KEY
               else '{}/{}'.format(head.name, k))
        export_outputs[key] = v
        # Collect predict serving key for merged_predict_outputs
        if (k == base_head.PREDICT_SERVING_KEY and
            isinstance(v, export_output.PredictOutput)):
          for kp, vp in six.iteritems(v.outputs):
            merged_predict_outputs['{}/{}'.format(head.name, kp)] = vp
    export_outputs[base_head.PREDICT_SERVING_KEY] = (
        export_output.PredictOutput(merged_predict_outputs))
    return export_outputs
Example #4
0
def _get_export_outputs(export_outputs, predictions):
    """Validate export_outputs or create default export_outputs.

  Args:
    export_outputs: Describes the output signatures to be exported to
      `SavedModel` and used during serving. Should be a dict or None.
    predictions:  Predictions `Tensor` or dict of `Tensor`.

  Returns:
    Valid export_outputs dict

  Raises:
    TypeError: if export_outputs is not a dict or its values are not
      ExportOutput instances.
  """
    if export_outputs is None:
        default_output = export_output_lib.PredictOutput(predictions)
        export_outputs = {
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            default_output
        }

    if not isinstance(export_outputs, dict):
        raise TypeError(
            'export_outputs must be dict, given: {}'.format(export_outputs))
    for v in six.itervalues(export_outputs):
        if not isinstance(v, export_output_lib.ExportOutput):
            raise TypeError(
                'Values in export_outputs must be ExportOutput objects. '
                'Given: {}'.format(export_outputs))

    _maybe_add_default_serving_output(export_outputs)

    return export_outputs
Example #5
0
  def test_build_all_signature_defs_serving_only(self):
    receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
    output_1 = constant_op.constant([1.])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(outputs=output_1),
        "train": export_output.TrainOutput(loss=output_1),
    }

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs)

    expected_signature_defs = {
        "serving_default": signature_def_utils.predict_signature_def(
            receiver_tensor, {"output": output_1})
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs, serving_only=False)

    expected_signature_defs.update({
        "train": signature_def_utils.supervised_train_signature_def(
            receiver_tensor, loss={"loss": output_1})
    })

    self.assertDictEqual(expected_signature_defs, signature_defs)
Example #6
0
  def test_build_all_signature_defs_without_receiver_alternatives(self):
    receiver_tensor = array_ops.placeholder(dtypes.string)
    output_1 = constant_op.constant([1.])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.RegressionOutput(value=output_1),
        "head-2": export_output.ClassificationOutput(classes=output_2),
        "head-3": export_output.PredictOutput(outputs={
            "some_output_3": output_3
        }),
    }

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs)

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(receiver_tensor,
                                                         output_1),
        "head-2":
            signature_def_utils.classification_signature_def(receiver_tensor,
                                                             output_2, None),
        "head-3":
            signature_def_utils.predict_signature_def({
                "input": receiver_tensor
            }, {"some_output_3": output_3})
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)
    def _assertDefaultExportOutputForPredictions(self, predictions):
        spec = model_fn.EstimatorSpec(mode=ModeKeys.PREDICT,
                                      predictions=predictions)

        expected = export_output.PredictOutput(predictions).outputs
        serving_output = spec.export_outputs[
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
        self.assertEqual(serving_output.outputs, expected)
Example #8
0
  def test_build_all_signature_defs_with_single_alternatives(self):
    receiver_tensor = array_ops.placeholder(dtypes.string)
    receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
    receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
        dtypes.float32)
    # Note we are passing single Tensors as values of
    # receiver_tensors_alternatives, where normally that is a dict.
    # In this case a dict will be created using the default receiver tensor
    # name "input".
    receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1,
                                     "other2": receiver_tensors_alternative_2}
    output_1 = constant_op.constant([1.])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.RegressionOutput(value=output_1),
        "head-2": export_output.ClassificationOutput(classes=output_2),
        "head-3": export_output.PredictOutput(outputs={
            "some_output_3": output_3
        }),
    }

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs, receiver_tensors_alternatives)

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(
                receiver_tensor,
                output_1),
        "head-2":
            signature_def_utils.classification_signature_def(
                receiver_tensor,
                output_2, None),
        "head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensor},
                {"some_output_3": output_3}),
        "other1:head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensors_alternative_1},
                {"some_output_3": output_3}),
        "other2:head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensors_alternative_2},
                {"some_output_3": output_3})

        # Note that the alternatives 'other:serving_default' and 'other:head-2'
        # are invalid, because regession and classification signatures must take
        # a single string input.  Here we verify that these invalid signatures
        # are not included in the export.
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)
Example #9
0
def _predict_spec(tower_specs, aggregation_device):
    """Populate replicated EstimatorSpec for `GraphKeys.PREDICT`."""
    estimator_spec = _asdict(tower_specs[0])
    estimator_spec['mode'] = model_fn_lib.ModeKeys.PREDICT

    with ops_lib.device(aggregation_device):
        estimator_spec['predictions'] = _concat_tensor_dicts(
            *[tower_spec.predictions for tower_spec in tower_specs])

        export_outputs_dict = _dict_concat(
            *[tower_spec.export_outputs for tower_spec in tower_specs])

        export_outputs = {}
        for name, export_output_list in six.iteritems(export_outputs_dict):
            if isinstance(export_output_list[0],
                          export_output_lib.PredictOutput):
                export_outputs[name] = export_output_lib.PredictOutput(
                    outputs=_concat_tensor_dicts(*[
                        export_output.outputs
                        for export_output in export_output_list
                    ]))
            elif isinstance(export_output_list[0],
                            export_output_lib.RegressionOutput):
                export_outputs[name] = export_output_lib.RegressionOutput(
                    value=array_ops.concat([
                        export_output.value
                        for export_output in export_output_list
                    ],
                                           axis=0))
            elif isinstance(export_output_list[0],
                            export_output_lib.ClassificationOutput):
                scores = None
                if export_output_list[0].scores is not None:
                    scores = array_ops.concat([
                        export_output.scores
                        for export_output in export_output_list
                    ],
                                              axis=0)

                classes = None
                if export_output_list[0].classes is not None:
                    classes = array_ops.stack([
                        export_output.classes
                        for export_output in export_output_list
                    ],
                                              axis=0)

                export_outputs[name] = export_output_lib.ClassificationOutput(
                    scores=scores, classes=classes)

    estimator_spec['export_outputs'] = export_outputs
    return model_fn_lib.EstimatorSpec(**estimator_spec)
def model_fn_global_step_incrementer(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    if mode != _PREDICT:
        loss = losses.mean_squared_error(labels, predictions)
        optimizer = tf.tpu.CrossShardOptimizer(
            training.GradientDescentOptimizer(learning_rate=0.5))
        train_op = optimizer.minimize(loss, training.get_global_step())
    return tpu_estimator.TPUEstimatorSpec(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs={
            'test': export_output.PredictOutput({'prediction': predictions})
        })
Example #11
0
 def testExportOutputsMultiheadWithDefault(self):
   with ops.Graph().as_default(), self.cached_session():
     predictions = {'loss': constant_op.constant(1.)}
     output_1 = constant_op.constant([1.])
     output_2 = constant_op.constant(['2'])
     output_3 = constant_op.constant(['3'])
     export_outputs = {
         signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
         export_output.RegressionOutput(value=output_1),
         'head-2': export_output.ClassificationOutput(classes=output_2),
         'head-3': export_output.PredictOutput(outputs={
             'some_output_3': output_3
         })}
     estimator_spec = model_fn.EstimatorSpec(
         mode=model_fn.ModeKeys.PREDICT,
         predictions=predictions,
         export_outputs=export_outputs)
     self.assertEqual(export_outputs, estimator_spec.export_outputs)
    def model_fn(features, labels, mode, params):
        del params
        loss = None
        train_op = None
        predictions = dense_computation(features)
        export_outputs = None
        if mode != _PREDICT:
            loss = tf.compat.v1.losses.mean_squared_error(labels, predictions)
            optimizer = tf.compat.v1.tpu.CrossShardOptimizer(
                tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(loss,
                                          tf.compat.v1.train.get_global_step())
        else:
            if export_tpu_tensor:
                key = tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
                export_outputs = {
                    key:
                    export_output_lib.PredictOutput(
                        {'prediction': predictions})
                }
            else:
                export_outputs = {}

            if export_cpu_tensor:

                def host_call(predictions):
                    classes = tf.as_string(predictions, name='classes')
                    classification_output = export_output_lib.ClassificationOutput(
                        classes=classes)
                    export_outputs['classification'] = classification_output

                tf.compat.v1.tpu.outside_compilation(host_call, predictions)

        if tpu_estimator_spec:
            spec_type = tpu_estimator.TPUEstimatorSpec
        else:
            spec_type = model_fn_lib.EstimatorSpec

        return spec_type(mode,
                         loss=loss,
                         train_op=train_op,
                         predictions={'predictions': predictions},
                         export_outputs=export_outputs)
Example #13
0
 def testExportOutputsMultiheadMissingDefault(self):
   with ops.Graph().as_default(), self.cached_session():
     predictions = {'loss': constant_op.constant(1.)}
     output_1 = constant_op.constant([1.])
     output_2 = constant_op.constant(['2'])
     output_3 = constant_op.constant(['3'])
     export_outputs = {
         'head-1': export_output.RegressionOutput(value=output_1),
         'head-2': export_output.ClassificationOutput(classes=output_2),
         'head-3': export_output.PredictOutput(outputs={
             'some_output_3': output_3
         })}
     with self.assertRaisesRegexp(
         ValueError,
         'Multiple export_outputs were provided, but none of them is '
         'specified as the default.  Do this by naming one of them with '
         'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.'):
       model_fn.EstimatorSpec(
           mode=model_fn.ModeKeys.PREDICT,
           predictions=predictions,
           export_outputs=export_outputs)
Example #14
0
 def testExportOutputsMultiheadMissingDefault(self):
     with tf.Graph().as_default(), self.cached_session():
         predictions = {'loss': tf.constant(1.)}
         output_1 = tf.constant([1.])
         output_2 = tf.constant(['2'])
         output_3 = tf.constant(['3'])
         export_outputs = {
             'head-1':
             export_output.RegressionOutput(value=output_1),
             'head-2':
             export_output.ClassificationOutput(classes=output_2),
             'head-3':
             export_output.PredictOutput(
                 outputs={'some_output_3': output_3})
         }
         with self.assertRaisesRegexp(
                 ValueError,
                 'Multiple [`]*export_outputs[`]* were provided'):
             model_fn.EstimatorSpec(mode=ModeKeys.PREDICT,
                                    predictions=predictions,
                                    export_outputs=export_outputs)
Example #15
0
  def _merge_predict(self, all_estimator_spec):
    """Merges list of `EstimatorSpec` for prediction.

    Args:
      all_estimator_spec: list of `EstimatorSpec` for the individual heads.

    Returns:
      `EstimatorSpec` that merges all heads for PREDICT.
    """
    predictions = {}
    export_outputs = {
        _DEFAULT_SERVING_KEY: _default_export_output(
            all_estimator_spec[0].export_outputs,
            self._heads[0].name),
    }
    merged_predict_outputs = {}
    for head, spec in zip(self._heads, all_estimator_spec):
      head_name = head.name
      for k, v in six.iteritems(spec.export_outputs):
        if k == _DEFAULT_SERVING_KEY:
          key = head_name
        else:
          key = '%s/%s' % (head_name, k)
        export_outputs[key] = v
        if (k == head_lib._PREDICT_SERVING_KEY and  # pylint:disable=protected-access
            isinstance(v, export_output_lib.PredictOutput)):
          for kp, vp in six.iteritems(v.outputs):
            key = '%s/%s' % (head_name, kp)
            merged_predict_outputs[key] = vp
      for k, v in six.iteritems(spec.predictions):
        predictions[(head_name, k)] = v
    export_outputs[head_lib._PREDICT_SERVING_KEY] = (  # pylint:disable=protected-access
        export_output_lib.PredictOutput(merged_predict_outputs))

    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.PREDICT,
        predictions=predictions,
        export_outputs=export_outputs)
Example #16
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   trainable_variables=None,
                                   train_op_fn=None,
                                   update_ops=None,
                                   regularization_losses=None):
        """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: An `tf.keras.optimizers.Optimizer` instance to optimize the
         loss in TRAIN mode. Namely, sets
        `train_op = optimizer.get_updates(loss, trainable_variables)`,
        which updates variables to minimize `loss`.able_variables)`,
        which updates variables to minimize `loss`.
      trainable_variables: A list or tuple of `Variable` objects to update to
        minimize `loss`. In Tensorflow 1.x, by default these are the list of
        variables collected in the graph under the key
        `GraphKeys.TRAINABLE_VARIABLES`. As Tensorflow 2.x doesn't have
        collections and GraphKeys, trainable_variables need to be passed
        explicitly here.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      update_ops: A list or tuple of update ops to be run at training time. For
        example, layers such as BatchNormalization create mean and variance
        update ops that need to be run at training time. In Tensorflow 1.x,
        these are thrown into an UPDATE_OPS collection. As Tensorflow 2.x
        doesn't have collections, update_ops need to be passed explicitly here.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'head'):
            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            predictions = self.predictions(logits)
            if mode == ModeKeys.PREDICT:
                probabilities = predictions[pred_keys.PROBABILITIES]
                classifier_output = base_head.classification_output(
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        base_head.DEFAULT_SERVING_KEY: classifier_output,
                        base_head.CLASSIFY_SERVING_KEY: classifier_output,
                        base_head.PREDICT_SERVING_KEY: (
                            export_output.PredictOutput(predictions))
                    })

            regularized_training_loss = self.loss(
                logits=logits,
                labels=labels,
                features=features,
                mode=mode,
                regularization_losses=regularization_losses)
            # Eval.
            if mode == ModeKeys.EVAL:
                eval_metrics = self.metrics(
                    regularization_losses=regularization_losses)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=base_head.create_eval_metrics_tuple(
                        self.update_metrics, {
                            'eval_metrics': eval_metrics,
                            'features': features,
                            'logits': logits,
                            'labels': labels,
                            'regularization_losses': regularization_losses
                        }))
            # Train.
            train_op = base_head.create_estimator_spec_train_op(
                head_name=self._name,
                optimizer=optimizer,
                train_op_fn=train_op_fn,
                update_ops=update_ops,
                trainable_variables=trainable_variables,
                regularized_training_loss=regularized_training_loss,
                loss_reduction=self._loss_reduction)
        # Create summary.
        base_head.create_estimator_spec_summary(
            regularized_training_loss=regularized_training_loss,
            regularization_losses=regularization_losses,
            summary_key_fn=self._summary_key)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)
  def _create_tpu_estimator_spec(self,
                                 features,
                                 mode,
                                 logits,
                                 labels=None,
                                 optimizer=None,
                                 trainable_variables=None,
                                 train_op_fn=None,
                                 update_ops=None,
                                 regularization_losses=None):
    """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: for single event, indepdent event, logits is a tensor of shape
        [batch_size, 1], for correlated event, a dict with event_name as key,
        value as tensor of shape [batch_size, 1].
      labels: dict keyed by 'event_name' and 'event_name.time_of_event' with
        value as tensors of shape [batch_size] or [batch_size, 1]. For
        correlated events, labels for all events are provided. Otherwise, only
        the event associated with this head is provided.
        Here is one example label:
        {u'respiration_failure.time_to_event':
         <tf.Tensor 'Cast:0' shape=(32,) dtype=float32>,
         u'respiration_failure':
        <tf.Tensor 'Batch/batch:110' shape=(32,) dtype=int64>} `labels` is
          required argument when `mode` equals `TRAIN` or `EVAL`.
      optimizer: An `tf.keras.optimizers.Optimizer` instance to optimize the
        loss in TRAIN mode. Namely, sets `train_op = optimizer.get_updates(loss,
        trainable_variables)`, which updates variables to minimize `loss`.
      trainable_variables: A list or tuple of `Variable` objects to update to
        minimize `loss`. In Tensorflow 1.x, by default these are the list of
        variables collected in the graph under the key
        `GraphKeys.TRAINABLE_VARIABLES`. As Tensorflow 2.x doesn't have
        collections and GraphKeys, trainable_variables need to be passed
        explicitly here.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      update_ops: A list or tuple of update ops to be run at training time. For
        example, layers such as BatchNormalization create mean and variance
        update ops that need to be run at training time. In Tensorflow 1.x,
        these are thrown into an UPDATE_OPS collection. As Tensorflow 2.x
        doesn't have collections, update_ops need to be passed explicitly here.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.

    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
    tf.logging.info(mode)

    with ops.name_scope(self._name, 'survival_head'):
      # Predict.
      predictions = self.predictions(logits)
      # hazard_rates = self.hazard_rates(logits)

      if mode == model_fn.ModeKeys.PREDICT:
        # survival_output = SurvivalOutput(value=hazard_rates)

        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                base_head.DEFAULT_SERVING_KEY: (
                    export_output.PredictOutput(predictions)),
                base_head.PREDICT_SERVING_KEY: (
                    export_output.PredictOutput(predictions))
            })

      regularized_training_loss = self.loss(
          logits=logits,
          labels=labels,
          features=features,
          mode=mode,
          regularization_losses=regularization_losses)
      # Eval.
      if mode == model_fn.ModeKeys.EVAL:
        eval_metrics = self.metrics(regularization_losses=regularization_losses)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=regularized_training_loss,
            eval_metrics=base_head.create_eval_metrics_tuple(
                self.update_metrics, {
                    'eval_metrics': eval_metrics,
                    'features': features,
                    'logits': logits,
                    'labels': labels,
                    'regularization_losses': regularization_losses
                }))
      # Train.
      train_op = base_head.create_estimator_spec_train_op(
          self._name,
          optimizer=optimizer,
          trainable_variables=trainable_variables,
          train_op_fn=train_op_fn,
          update_ops=update_ops,
          regularized_training_loss=regularized_training_loss)
    # Create summary.
    base_head.create_estimator_spec_summary(
        regularized_training_loss, regularization_losses, self._summary_key)
    return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=regularized_training_loss,
        train_op=train_op)
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   trainable_variables=None,
                                   train_op_fn=None,
                                   update_ops=None,
                                   regularization_losses=None):
        """See superclass for description."""

        with tf.compat.v1.name_scope(self._name, 'head'):
            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            predictions = self.predictions(logits)
            if mode == ModeKeys.PREDICT:
                probabilities = predictions[pred_keys.PROBABILITIES]
                logistic = predictions[pred_keys.LOGISTIC]
                classifier_output = base_head.classification_output(
                    scores=probabilities,
                    n_classes=2,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        base_head.DEFAULT_SERVING_KEY: classifier_output,
                        base_head.CLASSIFY_SERVING_KEY: classifier_output,
                        base_head.REGRESS_SERVING_KEY:
                            export_output.RegressionOutput(value=logistic),
                        base_head.PREDICT_SERVING_KEY:
                            export_output.PredictOutput(predictions)
                    })
            regularized_training_loss = self.loss(
                logits=logits,
                labels=labels,
                features=features,
                mode=mode,
                regularization_losses=regularization_losses)
            scalar_loss = tf.reduce_mean(regularized_training_loss)
            # Eval.
            if mode == ModeKeys.EVAL:
                eval_metrics = self.metrics(
                    regularization_losses=regularization_losses)
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=ModeKeys.EVAL,
                    predictions=predictions,
                    loss=scalar_loss,
                    eval_metrics=base_head.create_eval_metrics_tuple(
                        self.update_metrics, {
                            'eval_metrics': eval_metrics,
                            'features': features,
                            'logits': logits,
                            'labels': labels,
                            'regularization_losses': regularization_losses
                        }))
            # Train.
            train_op = base_head.create_estimator_spec_train_op(
                head_name=self._name,
                optimizer=optimizer,
                train_op_fn=train_op_fn,
                update_ops=update_ops,
                trainable_variables=trainable_variables,
                regularized_training_loss=regularized_training_loss,
                loss_reduction=self._loss_reduction)
        # Create summary.
        base_head.create_estimator_spec_summary(
            regularized_training_loss=scalar_loss,
            regularization_losses=regularization_losses,
            summary_key_fn=self._summary_key)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=ModeKeys.TRAIN,
            predictions=predictions,
            loss=scalar_loss,
            train_op=train_op)
Example #19
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   train_op_fn=None,
                                   regularization_losses=None):
        """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'head'):
            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            predictions = self.predictions(logits)
            if mode == model_fn.ModeKeys.PREDICT:
                probabilities = predictions[pred_keys.PROBABILITIES]
                classifier_output = base_head.classification_output(
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        base_head.DEFAULT_SERVING_KEY: classifier_output,
                        base_head.CLASSIFY_SERVING_KEY: classifier_output,
                        base_head.PREDICT_SERVING_KEY: (
                            export_output.PredictOutput(predictions))
                    })

            regularized_training_loss = self.loss(
                logits=logits,
                labels=labels,
                features=features,
                mode=mode,
                regularization_losses=regularization_losses)
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                eval_metrics = self.metrics(
                    regularization_losses=regularization_losses)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=base_head.create_eval_metrics_tuple(
                        self.update_metrics, {
                            'eval_metrics': eval_metrics,
                            'features': features,
                            'logits': logits,
                            'labels': labels,
                            'regularization_losses': regularization_losses
                        }))
            # Train.
            train_op = base_head.create_estimator_spec_train_op(
                self._name, optimizer, train_op_fn, regularized_training_loss)
        # Create summary.
        base_head.create_estimator_spec_summary(regularized_training_loss,
                                                regularization_losses,
                                                self._summary_key)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)
Example #20
0
  def model_fn(self, features, mode, config):
    """Model function for the estimator.

    Note that this does not take a `labels` arg. This works, but `input_fn` must
    return either `features` or, equivalently, `(features, None)`.

    Args:
      features: The input points. See `tf.estimator.Estimator`.
      mode: See `tf.estimator.Estimator`.
      config: See `tf.estimator.Estimator`.

    Returns:
      A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
      this behavior:
        * `train_op`: Execute one mini-batch or full-batch run of Lloyd's
             algorithm.
        * `loss`: The sum of the squared distances from each input point to its
             closest center.
        * `eval_metric_ops`: Maps `SCORE` to `loss`.
        * `predictions`: Maps `ALL_DISTANCES` to the distance from each input
             point to each cluster center; maps `CLUSTER_INDEX` to the index of
             the closest cluster center for each input point.
    """
    # input_points is a single Tensor. Therefore, the sharding functionality
    # in clustering_ops is unused, and some of the values below are lists of a
    # single item.
    input_points = _parse_features_if_necessary(features, self._feature_columns)

    # Let N = the number of input_points.
    # all_distances: A list of one matrix of shape (N, num_clusters). Each value
    #   is the distance from an input point to a cluster center.
    # model_predictions: A list of one vector of shape (N). Each value is the
    #   cluster id of an input point.
    # losses: Similar to cluster_idx but provides the distance to the cluster
    #   center.
    # is_initialized: scalar indicating whether the initial cluster centers
    #   have been chosen; see init_op.
    # init_op: an op to choose the initial cluster centers. A single worker
    #   repeatedly executes init_op until is_initialized becomes True.
    # training_op: an op that runs an iteration of training, either an entire
    #   Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
    #   may execute this op, but only after is_initialized becomes True.
    (all_distances, model_predictions, losses, is_initialized, init_op,
     training_op) = clustering_ops.KMeans(
         inputs=input_points,
         num_clusters=self._num_clusters,
         initial_clusters=self._initial_clusters,
         distance_metric=self._distance_metric,
         use_mini_batch=self._use_mini_batch,
         mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
     ).training_graph()

    loss = math_ops.reduce_sum(losses)
    summary.scalar('loss/raw', loss)

    incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
    training_op = control_flow_ops.with_dependencies([training_op, incr_step],
                                                     loss)

    training_hooks = [
        _InitializeClustersHook(init_op, is_initialized, config.is_chief)
    ]
    if self._relative_tolerance is not None:
      training_hooks.append(
          _LossRelativeChangeHook(loss, self._relative_tolerance))

    export_outputs = {
        KMeansClustering.ALL_DISTANCES:
            export_output.PredictOutput(all_distances[0]),
        KMeansClustering.CLUSTER_INDEX:
            export_output.PredictOutput(model_predictions[0]),
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(model_predictions[0])
    }

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        predictions={
            KMeansClustering.ALL_DISTANCES: all_distances[0],
            KMeansClustering.CLUSTER_INDEX: model_predictions[0],
        },
        loss=loss,
        train_op=training_op,
        eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
        training_hooks=training_hooks,
        export_outputs=export_outputs)
Example #21
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   train_op_fn=None,
                                   regularization_losses=None):
        """Returns a `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
        For many applications, the shape is `[batch_size, logits_dimension]`.
      labels: Labels integer or string `Tensor` with shape matching `logits`,
        namely `[D0, D1, ... DN, 1]` or `[D0, D1, ... DN]`. `labels` is required
        argument when `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` when creating the head to avoid
        scaling errors.

    Returns:
      A `model_fn._TPUEstimatorSpec` instance.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with tf.compat.v1.name_scope(self._name, 'head'):
            logits = _check_logits_final_dim(logits, self.logits_dimension)

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with tf.compat.v1.name_scope(None, 'predictions', (logits, )):
                all_class_ids = _all_class_ids(logits, self._n_classes)
                all_classes = _all_classes(
                    logits,
                    self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                # class_ids's shape is [D0, D1, ... DN].
                class_ids = tf.compat.v1.math.argmax(logits,
                                                     axis=-1,
                                                     name=pred_keys.CLASS_IDS)
                class_ids = tf.compat.v1.expand_dims(class_ids, axis=-1)
                if self._label_vocabulary:
                    table = lookup_ops.index_to_string_table_from_tensor(
                        vocabulary_list=self._label_vocabulary,
                        name='class_string_lookup')
                    classes = table.lookup(class_ids)
                else:
                    classes = tf.strings.as_string(class_ids,
                                                   name='str_classes')

                probabilities = tf.compat.v1.nn.softmax(
                    logits, name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                    # Expand to [batch_size, 1]
                    pred_keys.CLASS_IDS: class_ids,
                    pred_keys.CLASSES: classes,
                    pred_keys.ALL_CLASS_IDS: all_class_ids,
                    pred_keys.ALL_CLASSES: all_classes,
                }
            if mode == ModeKeys.PREDICT:
                classifier_output = _classification_output(
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY: classifier_output,
                        _CLASSIFY_SERVING_KEY: classifier_output,
                        _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
                    })

            training_loss, unreduced_loss, weights, label_ids = self.create_loss(
                features=features, mode=mode, logits=logits, labels=labels)
            if regularization_losses:
                regularization_loss = tf.math.add_n(regularization_losses)
                regularized_training_loss = tf.math.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss

            if self._loss_reduction == tf.compat.v1.losses.Reduction.NONE:
                scalar_loss = tf.reduce_mean(regularized_training_loss)
            else:
                scalar_loss = regularized_training_loss

            # Eval.
            if mode == ModeKeys.EVAL:
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=ModeKeys.EVAL,
                    predictions=predictions,
                    loss=scalar_loss,
                    eval_metrics=_create_eval_metrics_tuple(
                        self._eval_metric_ops, {
                            'labels': label_ids,
                            'class_ids': class_ids,
                            'weights': weights,
                            'unreduced_loss': unreduced_loss,
                            'regularization_loss': regularization_loss
                        }))

            # Train.
            if optimizer is not None:
                if train_op_fn is not None:
                    raise ValueError(
                        'train_op_fn and optimizer cannot both be set.')
                train_op = optimizer.minimize(
                    regularized_training_loss,
                    global_step=tf.compat.v1.train.get_global_step())
            elif train_op_fn is not None:
                train_op = train_op_fn(regularized_training_loss)
            else:
                raise ValueError(
                    'train_op_fn and optimizer cannot both be None.')
            train_op = _append_update_ops(train_op)
            # Only summarize mean_loss for SUM reduction to preserve backwards
            # compatibility. Otherwise skip it to avoid unnecessary computation.
            if self._loss_reduction == tf.compat.v1.losses.Reduction.SUM:
                example_weight_sum = tf.math.reduce_sum(
                    weights * tf.compat.v1.ones_like(unreduced_loss))
                mean_loss = training_loss / example_weight_sum
            else:
                mean_loss = None
        with tf.compat.v1.name_scope(''):
            keys = metric_keys.MetricKeys
            tf.compat.v1.summary.scalar(_summary_key(self._name, keys.LOSS),
                                        scalar_loss)
            if mean_loss is not None:
                tf.compat.v1.summary.scalar(
                    _summary_key(self._name, keys.LOSS_MEAN), mean_loss)
            if regularization_loss is not None:
                tf.compat.v1.summary.scalar(
                    _summary_key(self._name, keys.LOSS_REGULARIZATION),
                    regularization_loss)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=ModeKeys.TRAIN,
            predictions=predictions,
            loss=scalar_loss,
            train_op=train_op)
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   trainable_variables=None,
                                   train_op_fn=None,
                                   update_ops=None,
                                   regularization_losses=None):
        """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: estimated obs. value, [batch, time_len, num_obs] tensor.
      labels: ground truth observation, feature dict with obs. and interv. codes
        as keys, values tensor with shape [batch_size, context_window_size].
      optimizer: An `tf.keras.optimizers.Optimizer` instance to optimize the
        loss in TRAIN mode. Namely, sets `train_op = optimizer.get_updates(loss,
        trainable_variables)`, which updates variables to minimize `loss`.
      trainable_variables: A list or tuple of `Variable` objects to update to
        minimize `loss`. In Tensorflow 1.x, by default these are the list of
        variables collected in the graph under the key
        `GraphKeys.TRAINABLE_VARIABLES`. As Tensorflow 2.x doesn't have
        collections and GraphKeys, trainable_variables need to be passed
        explicitly here.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      update_ops: A list or tuple of update ops to be run at training time. For
        example, layers such as BatchNormalization create mean and variance
        update ops that need to be run at training time. In Tensorflow 1.x,
        these are thrown into an UPDATE_OPS collection. As Tensorflow 2.x
        doesn't have collections, update_ops need to be passed explicitly here.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.

    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'sequence_head'):
            # Predict.
            predictions = self.predictions(logits)
            if mode == model_fn.ModeKeys.PREDICT:
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        base_head.DEFAULT_SERVING_KEY:
                            export_output.PredictOutput(predictions),
                        base_head.PREDICT_SERVING_KEY: (
                            export_output.PredictOutput(predictions))
                    })

            regularized_training_loss = self.loss(
                logits=logits,
                labels=labels,
                features=features,
                mode=mode,
                regularization_losses=regularization_losses)
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                eval_metrics = self.metrics(
                    regularization_losses=regularization_losses)
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=base_head.create_eval_metrics_tuple(
                        self.update_metrics, {
                            'eval_metrics': eval_metrics,
                            'features': features,
                            'logits': logits,
                            'labels': labels,
                            'regularization_losses': regularization_losses
                        }))
            # Train.
            train_op = base_head.create_estimator_spec_train_op(
                self._name,
                optimizer=optimizer,
                trainable_variables=trainable_variables,
                train_op_fn=train_op_fn,
                update_ops=update_ops,
                regularized_training_loss=regularized_training_loss)
        # Create summary.
        base_head.create_estimator_spec_summary(regularized_training_loss,
                                                regularization_losses,
                                                self._summary_key)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)
Example #23
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   train_op_fn=None,
                                   regularization_losses=None):
        """Returns an `EstimatorSpec`.

    Args:
      features: Input `dict` mapping string feature names to `Tensor` or
        `SparseTensor` objects containing the values for that feature in a
        minibatch. Often to be used to fetch example-weight tensor.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
        For many applications, the shape is `[batch_size, logits_dimension]`.
      labels: Labels `Tensor` with shape matching `logits`, namely
        `[D0, D1, ... DN, logits_dimension]`. When `logits_dimension=1`, shape
        `[D0, D1, ... DN]` is also supported. `labels` is a required argument
        when `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` when creating the head to
        avoid scaling errors.

    Returns:
      A `model_fn._TPUEstimatorSpec` instance.

    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'head'):
            # Predict.
            predictions = self.predictions(logits)
            if mode == model_fn.ModeKeys.PREDICT:
                keys = prediction_keys.PredictionKeys
                regression_output = export_output.RegressionOutput(
                    value=predictions[keys.PREDICTIONS])
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        base_head.DEFAULT_SERVING_KEY: regression_output,
                        base_head.REGRESS_SERVING_KEY: regression_output,
                        base_head.PREDICT_SERVING_KEY: export_output.PredictOutput(
                            predictions)
                    })
            regularized_training_loss = self.loss(
                logits=logits,
                labels=labels,
                features=features,
                mode=mode,
                regularization_losses=regularization_losses)
            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                eval_metrics = self.metrics(
                    regularization_losses=regularization_losses)
                return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=base_head.create_eval_metrics_tuple(
                        self.update_metrics, {
                            'eval_metrics': eval_metrics,
                            'features': features,
                            'logits': logits,
                            'labels': labels,
                            'regularization_losses': regularization_losses
                        }))
            # Train.
            train_op = base_head.create_estimator_spec_train_op(
                head_name=self._name,
                optimizer=optimizer,
                train_op_fn=train_op_fn,
                update_ops=self._update_ops,
                regularized_training_loss=regularized_training_loss)
        # Create summary.
        base_head.create_estimator_spec_summary(
            regularized_training_loss=regularized_training_loss,
            regularization_losses=regularization_losses,
            summary_key_fn=self._summary_key)
        return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)
Example #24
0
    def _create_tpu_estimator_spec(self,
                                   features,
                                   mode,
                                   logits,
                                   labels=None,
                                   optimizer=None,
                                   train_op_fn=None,
                                   regularization_losses=None):
        """Returns an `model_fn._TPUEstimatorSpec`.

    Args:
      features: Input `dict` of `Tensor` or `SparseTensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
        For many applications, the shape is `[batch_size, n_classes]`.
      labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
        with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
        `dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
        `mode` equals `TRAIN` or `EVAL`.
      optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
        Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
        updates variables and increments `global_step`.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns
        `train_op`. Used if `optimizer` is `None`.
      regularization_losses: A list of additional scalar losses to be added to
        the training loss, such as regularization losses. These losses are
        usually expressed as a batch average, so for best results users need to
        set `loss_reduction=SUM_OVER_BATCH_SIZE` or
        `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
        avoid scaling errors.
    Returns:
      `model_fn._TPUEstimatorSpec`.
    Raises:
      ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
        mode, or if both are set.
    """
        with ops.name_scope(self._name, 'head'):
            logits = head_lib._check_logits_final_dim(logits,
                                                      self.logits_dimension)  # pylint:disable=protected-access

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                probabilities = math_ops.sigmoid(logits,
                                                 name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS: logits,
                    pred_keys.PROBABILITIES: probabilities,
                }
            if mode == model_fn.ModeKeys.PREDICT:
                classifier_output = head_lib._classification_output(  # pylint:disable=protected-access
                    scores=probabilities,
                    n_classes=self._n_classes,
                    label_vocabulary=self._label_vocabulary)
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        _DEFAULT_SERVING_KEY: classifier_output,
                        head_lib._CLASSIFY_SERVING_KEY: classifier_output,  # pylint:disable=protected-access
                        head_lib._PREDICT_SERVING_KEY: (  # pylint:disable=protected-access
                            export_output.PredictOutput(predictions))
                    })

            (training_loss, unreduced_loss, weights,
             processed_labels) = self.create_loss(features=features,
                                                  mode=mode,
                                                  logits=logits,
                                                  labels=labels)
            if regularization_losses:
                regularization_loss = math_ops.add_n(regularization_losses)
                regularized_training_loss = math_ops.add_n(
                    [training_loss, regularization_loss])
            else:
                regularization_loss = None
                regularized_training_loss = training_loss

            # Eval.
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=regularized_training_loss,
                    eval_metrics=head_lib._create_eval_metrics_tuple(  # pylint:disable=protected-access
                        self._eval_metric_ops, {
                            'labels': processed_labels,
                            'probabilities': probabilities,
                            'weights': weights,
                            'unreduced_loss': unreduced_loss,
                            'regularization_loss': regularization_loss,
                        }))

            # Train.
            if optimizer is not None:
                if train_op_fn is not None:
                    raise ValueError(
                        'train_op_fn and optimizer cannot both be set.')
                train_op = optimizer.minimize(
                    regularized_training_loss,
                    global_step=training_util.get_global_step())
            elif train_op_fn is not None:
                train_op = train_op_fn(regularized_training_loss)
            else:
                raise ValueError(
                    'train_op_fn and optimizer cannot both be None.')
            train_op = head_lib._append_update_ops(train_op)  # pylint:disable=protected-access
            # Only summarize mean_loss for SUM reduction to preserve backwards
            # compatibility. Otherwise skip it to avoid unnecessary computation.
            if self._loss_reduction == losses.Reduction.SUM:
                example_weight_sum = math_ops.reduce_sum(
                    weights * array_ops.ones_like(unreduced_loss))
                mean_loss = training_loss / example_weight_sum
            else:
                mean_loss = None
        with ops.name_scope(''):
            keys = metric_keys.MetricKeys
            summary.scalar(
                head_lib._summary_key(self._name, keys.LOSS),  # pylint:disable=protected-access
                regularized_training_loss)
            if mean_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name, keys.LOSS_MEAN),  # pylint:disable=protected-access
                    mean_loss)
            if regularization_loss is not None:
                summary.scalar(
                    head_lib._summary_key(self._name,
                                          keys.LOSS_REGULARIZATION),  # pylint:disable=protected-access
                    regularization_loss)
        return model_fn._TPUEstimatorSpec(  # pylint:disable=protected-access
            mode=model_fn.ModeKeys.TRAIN,
            predictions=predictions,
            loss=regularized_training_loss,
            train_op=train_op)