def _serving_ops(self, features): """Add ops for serving to the graph.""" with variable_scope.variable_scope("model", use_resource=True): prediction_outputs = self.model.predict(features=features) with variable_scope.variable_scope("model", reuse=True): filtering_outputs = self.create_loss(features, estimator_lib.ModeKeys.EVAL) with variable_scope.variable_scope("model", reuse=True): no_state_features = { k: v for k, v in features.items() if not k.startswith(feature_keys.State.STATE_PREFIX) } # Ignore any state management when cold-starting. The model's default # start state is replicated across the batch. cold_filtering_outputs = self.model.define_loss( features=no_state_features, mode=estimator_lib.ModeKeys.EVAL) return estimator_lib.EstimatorSpec( mode=estimator_lib.ModeKeys.PREDICT, export_outputs={ feature_keys.SavedModelLabels.PREDICT: export_lib.PredictOutput(prediction_outputs), feature_keys.SavedModelLabels.FILTER: export_lib.PredictOutput( state_to_dictionary(filtering_outputs.end_state)), feature_keys.SavedModelLabels.COLD_START_FILTER: _NoStatePredictOutput( state_to_dictionary(cold_filtering_outputs.end_state)) }, # Likely unused, but it is necessary to return `predictions` to satisfy # the Estimator's error checking. predictions={})
def _serving(features): with variable_scope.variable_scope("model"): prediction_outputs = model.predict(features=features) with variable_scope.variable_scope("model", reuse=True): filtering_outputs = state_manager.define_loss( model, features, estimator_lib.ModeKeys.EVAL) return estimator_lib.EstimatorSpec( mode=estimator_lib.ModeKeys.PREDICT, export_outputs={ feature_keys.SavedModelLabels.PREDICT: export_lib.PredictOutput(prediction_outputs), feature_keys.SavedModelLabels.FILTER: export_lib.PredictOutput( state_to_dictionary(filtering_outputs.end_state)) }, # Likely unused, but it is necessary to return `predictions` to satisfy # the Estimator's error checking. predictions={})
def _serving_ops(self, features): """Add ops for serving to the graph.""" with variable_scope.variable_scope("model", use_resource=True): prediction_outputs = self.model.predict(features=features) with variable_scope.variable_scope("model", reuse=True): filtering_outputs = self.create_loss( features, estimator_lib.ModeKeys.EVAL) return estimator_lib.EstimatorSpec( mode=estimator_lib.ModeKeys.PREDICT, export_outputs={ feature_keys.SavedModelLabels.PREDICT: export_lib.PredictOutput(prediction_outputs), feature_keys.SavedModelLabels.FILTER: export_lib.PredictOutput( state_to_dictionary(filtering_outputs.end_state)) }, # Likely unused, but it is necessary to return `predictions` to satisfy # the Estimator's error checking. predictions={})
def create_estimator_spec(self, features, mode, logits, labels=None, regularization_losses=None): """Returns an `EstimatorSpec`. Args: features: Input `dict` of `Tensor` or `SparseTensor` objects. mode: Estimator's `ModeKeys`. logits: A `Tensor` with shape [batch_size, D]. Each value is the ranking score of the corresponding item. `D` is usually the `list_size`. It might be changed when `mode` is `PREDICT`. labels: A `Tensor` of the same shape as `logits` representing relevance. `labels` is required argument when `mode` equals `TRAIN` or `EVAL`. regularization_losses: A list of additional scalar losses to be added to the training loss, such as regularization losses. These losses are usually expressed as a batch average, so for best results users need to set `loss_reduction=SUM_OVER_BATCH_SIZE` or `loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to avoid scaling errors. Returns: `EstimatorSpec`. Raises: ValueError: If, in TRAIN mode, both `train_op_fn` and `optimizer` specified in the init function are `None` or if both are set. """ logits = ops.convert_to_tensor(logits) # Predict. with ops.name_scope(self._name, 'head'): if mode == model_fn.ModeKeys.PREDICT: return model_fn.EstimatorSpec( mode=mode, predictions=logits, export_outputs={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: # export_lib.RegressionOutput(logits), export_lib.PredictOutput(logits) }) training_loss, _, _, _ = self.create_loss( features=features, mode=mode, logits=logits, labels=labels) if regularization_losses: regularization_loss = math_ops.add_n(regularization_losses) regularized_training_loss = math_ops.add(training_loss, regularization_loss) else: regularized_training_loss = training_loss # Eval. if mode == model_fn.ModeKeys.EVAL: eval_metric_ops = { name: metric_fn( labels=labels, predictions=logits, features=features) for name, metric_fn in six.iteritems(self._eval_metric_fns) } eval_metric_ops.update(self._labels_and_logits_metrics(labels, logits)) return model_fn.EstimatorSpec( mode=mode, predictions=logits, loss=regularized_training_loss, eval_metric_ops=eval_metric_ops) # Train. assert mode == model_fn.ModeKeys.TRAIN if self._optimizer is not None: if self._train_op_fn is not None: raise ValueError('train_op_fn and optimizer cannot both be set.') train_op = self._optimizer.minimize( regularized_training_loss, global_step=training_util.get_global_step()) elif self._train_op_fn is not None: train_op = self._train_op_fn(regularized_training_loss) else: raise ValueError('train_op_fn and optimizer cannot both be None.') return model_fn.EstimatorSpec( mode=mode, predictions=logits, loss=regularized_training_loss, train_op=train_op)