Esempio n. 1
0
 def _serving_ops(self, features):
     """Add ops for serving to the graph."""
     with tf.compat.v1.variable_scope("model", use_resource=True):
         prediction_outputs = self.model.predict(features=features)
     with tf.compat.v1.variable_scope("model", reuse=True):
         filtering_outputs = self.create_loss(features,
                                              estimator_lib.ModeKeys.EVAL)
     with tf.compat.v1.variable_scope("model", reuse=True):
         no_state_features = {
             k: v
             for k, v in features.items()
             if not k.startswith(feature_keys.State.STATE_PREFIX)
         }
         # Ignore any state management when cold-starting. The model's default
         # start state is replicated across the batch.
         cold_filtering_outputs = self.model.define_loss(
             features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
     return estimator_lib.EstimatorSpec(
         mode=estimator_lib.ModeKeys.PREDICT,
         export_outputs={
             feature_keys.SavedModelLabels.PREDICT:
             export_lib.PredictOutput(prediction_outputs),
             feature_keys.SavedModelLabels.FILTER:
             export_lib.PredictOutput(
                 state_to_dictionary(filtering_outputs.end_state)),
             feature_keys.SavedModelLabels.COLD_START_FILTER:
             _NoStatePredictOutput(
                 state_to_dictionary(cold_filtering_outputs.end_state))
         },
         # Likely unused, but it is necessary to return `predictions` to satisfy
         # the Estimator's error checking.
         predictions={})
Esempio n. 2
0
    def model_fn(features, labels, mode):
        """model_fn for keras Estimator."""
        # Raise an error when users use DistributionStrategy with native Keras
        # optimizers. Currently we only support native TensorFlow optimizers.
        if distribution_strategy_context.has_strategy() and \
            not isinstance(keras_model.optimizer,
                           (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
            raise ValueError(
                'Only TensorFlow native optimizers are supported with '
                'DistributionStrategy.')

        model = _clone_and_build_model(mode, keras_model, custom_objects,
                                       features, labels)
        model_output_names = []
        # We need to make sure that the output names of the last layer in the model
        # is the same for each of the cloned models. This is required for mirrored
        # strategy when we call regroup.
        if distribution_strategy_context.has_strategy():
            for name in model.output_names:
                name = re.compile(r'_\d$').sub('', name)
                model_output_names.append(name)
        else:
            model_output_names = model.output_names

        # Get inputs to EstimatorSpec
        predictions = dict(zip(model_output_names, model.outputs))

        loss = None
        train_op = None
        eval_metric_ops = None

        # Set loss and metric only during train and evaluate.
        if mode is not ModeKeys.PREDICT:
            if mode is ModeKeys.TRAIN:
                model._make_train_function()  # pylint: disable=protected-access
            else:
                model._make_test_function()  # pylint: disable=protected-access
            loss = model.total_loss

            eval_metric_ops = _convert_keras_metrics_to_estimator(model)

        # Set train_op only during train.
        if mode is ModeKeys.TRAIN:
            train_op = model.train_function.updates_op

        if not model._is_graph_network:
            # Reset model state to original state,
            # to avoid `model_fn` being destructive for the initial model argument.
            models.in_place_subclassed_model_state_restoration(keras_model)
        return model_fn_lib.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metric_ops,
            export_outputs={
                _DEFAULT_SERVING_KEY: export_lib.PredictOutput(predictions)
            })
    def model_fn(features, labels, mode, params):
        loss = None
        train_op = None
        export_outputs = None

        # This could be some pre-processing on CPU like calls to input layer with
        # embedding columns.
        x2 = features['x'] * 2

        def computation(input_tensor):
            return layers.dense(
                input_tensor,
                1,
                kernel_initializer=init_ops.zeros_initializer())

        if mode != _PREDICT:
            predictions = computation(x2)
            loss = losses.mean_squared_error(labels, predictions)
            optimizer = tf.tpu.CrossShardOptimizer(
                training.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(loss, training.get_global_step())
        else:
            inputs = [x2]
            if params['use_tpu']:
                predictions = array_ops.identity(
                    tpu_estimator.inference_on_tpu(computation,
                                                   inputs,
                                                   num_batch_threads=1,
                                                   max_batch_size=2,
                                                   batch_timeout_micros=100),
                    name='predictions')
            else:
                predictions = array_ops.identity(computation(*inputs),
                                                 name='predictions')
            key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
            export_outputs = {
                key: export_lib.PredictOutput({'prediction': predictions})
            }

            classes = string_ops.as_string(predictions, name='classes')
            classification_output = export_lib.ClassificationOutput(
                classes=classes)
            export_outputs['classification'] = classification_output

        return tpu_estimator.TPUEstimatorSpec(
            mode,
            loss=loss,
            train_op=train_op,
            predictions={'predictions': predictions},
            export_outputs=export_outputs)
Esempio n. 4
0
  def model_fn(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    export_outputs = None
    if mode != _PREDICT:
      loss = losses.mean_squared_error(labels, predictions)
      optimizer = tf.tpu.CrossShardOptimizer(
          training.GradientDescentOptimizer(learning_rate=0.5))
      train_op = optimizer.minimize(loss, training.get_global_step())
    else:
      if export_tpu_tensor:
        key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        export_outputs = {
            key: export_lib.PredictOutput({
                'prediction': predictions
            })
        }
      else:
        export_outputs = {}

      if export_cpu_tensor:

        def host_call(predictions):
          return string_ops.as_string(predictions, name='classes')

        classes = tf.tpu.outside_compilation(host_call, predictions)
        classification_output = export_lib.ClassificationOutput(
            classes=classes)
        export_outputs['classification'] = classification_output

    if tpu_estimator_spec:
      spec_type = tpu_estimator.TPUEstimatorSpec
    else:
      spec_type = model_fn_lib.EstimatorSpec

    return spec_type(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs=export_outputs)
    def test_predict_output_tensors_roundtrip(self):
        value1 = array_ops.placeholder(dtypes.float32, 1, name='value1')
        value2 = array_ops.placeholder(dtypes.float32, 1, name='value2')
        predict_output = export_lib.PredictOutput({
            'value1': value1,
            'value2': value2
        })
        export_output_tensors = tpu_estimator._export_output_to_tensors(
            predict_output)
        self.assertSameElements([value1, value2], export_output_tensors)
        self.assertLen(export_output_tensors, 2)

        tensors_new = [
            array_ops.identity(t, name=t.name.split(':')[0] + '_new')
            for t in export_output_tensors
        ]
        predict_output_new = tpu_estimator._clone_export_output_with_tensors(
            predict_output, tensors_new)
        outputs = predict_output_new.outputs
        self.assertLen(outputs, 2)
        self.assertEqual(outputs['value1'].name, 'value1_new:0')
        self.assertEqual(outputs['value2'].name, 'value2_new:0')
Esempio n. 6
0
    def model_fn(features, labels, mode):
        """model_fn for keras Estimator."""
        model = _clone_and_build_model(mode=mode,
                                       keras_model=keras_model,
                                       custom_objects=custom_objects,
                                       features=features,
                                       labels=labels,
                                       optimizer_config=optimizer_config)
        model_output_names = []
        # We need to make sure that the output names of the last layer in the model
        # is the same for each of the cloned models. This is required for mirrored
        # strategy when we call regroup.
        if tf.distribute.has_strategy():
            for name in model.output_names:
                name = re.compile(r'_\d$').sub('', name)
                model_output_names.append(name)
        else:
            model_output_names = model.output_names

        # Get inputs to EstimatorSpec
        predictions = dict(zip(model_output_names, model.outputs))

        loss = None
        train_op = None
        eval_metric_ops = None

        # Set loss and metric only during train and evaluate.
        if mode is not ModeKeys.PREDICT:
            if mode is ModeKeys.TRAIN:
                model._make_train_function()  # pylint: disable=protected-access
            else:
                model._make_test_function()  # pylint: disable=protected-access
            loss = model.total_loss

            eval_metric_ops = _convert_keras_metrics_to_estimator(model)

        # Set train_op only during train.
        if mode is ModeKeys.TRAIN:
            train_op = model.train_function.updates_op

        if (not model._is_graph_network
                and hasattr(keras_model, '_original_attributes_cache')
                and keras_model._original_attributes_cache is not None):
            # To avoid `model_fn` being destructive for the initial model argument.
            models.in_place_subclassed_model_state_restoration(keras_model)

        scaffold = None
        if save_object_ckpt:
            model._track_trackable(tf.compat.v1.train.get_global_step(),
                                   'estimator_global_step')
            # Create saver that maps variable names to object-checkpoint keys.
            object_graph = graph_view.ObjectGraphView(model)
            var_list = object_graph.frozen_saveable_objects()
            saver = tf.compat.v1.train.Saver(var_list=var_list, sharded=True)
            saver._object_restore_saver = trackable_util.frozen_saver(model)
            scaffold = tf.compat.v1.train.Scaffold(saver=saver)

        return model_fn_lib.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metric_ops,
            export_outputs={
                _DEFAULT_SERVING_KEY: export_lib.PredictOutput(predictions)
            },
            scaffold=scaffold)