def estimator_spec_predict(self, features, use_tpu=False): """Construct EstimatorSpec for PREDICT mode.""" decode_hparams = self._decode_hparams infer_out = self.infer(features, beam_size=decode_hparams.beam_size, top_beams=(decode_hparams.beam_size if decode_hparams.return_beams else 1), alpha=decode_hparams.alpha, decode_length=decode_hparams.extra_length, use_tpu=use_tpu) if isinstance(infer_out, dict): outputs = infer_out["outputs"] scores = infer_out["scores"] encoder_outputs = infer_out["encoder_outputs"] else: outputs = infer_out scores = None encoder_outputs = None inputs = features.get("inputs") if inputs is None: inputs = features["targets"] """ Modified """ # Added encoder outputs to predicion dictionary. predictions = { "outputs": outputs, "scores": scores, "encoder_outputs": encoder_outputs, "inputs": inputs, "targets": features.get("infer_targets"), "batch_prediction_key": features.get("batch_prediction_key"), } t2t_model._del_dict_nones(predictions) export_out = {"outputs": predictions["outputs"]} if "scores" in predictions: export_out["scores"] = predictions["scores"] if "batch_prediction_key" in predictions: export_out["batch_prediction_key"] = \ predictions["batch_prediction_key"] t2t_model._remove_summaries() export_outputs = { tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(export_out) } if use_tpu: return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs) else: return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs)
def estimator_spec_predict(self, features): """Construct EstimatorSpec for PREDICT mode.""" decode_hparams = self._decode_hparams infer_out = self.infer(features, beam_size=decode_hparams.beam_size, top_beams=(decode_hparams.beam_size if decode_hparams.return_beams else 1), alpha=decode_hparams.alpha, decode_length=decode_hparams.extra_length) if isinstance(infer_out, dict): outputs = infer_out["outputs"] scores = infer_out["scores"] else: outputs = infer_out scores = None batch_size = common_layers.shape_list( features[searchqa_problem.FeatureNames.SNIPPETS])[0] batched_problem_choice = (features["problem_choice"] * tf.ones( (batch_size, ), dtype=tf.int32)) predictions = { "outputs": outputs, "scores": scores, searchqa_problem.FeatureNames.SNIPPETS: features.get(searchqa_problem.FeatureNames.SNIPPETS), searchqa_problem.FeatureNames.QUESTION: features.get(searchqa_problem.FeatureNames.QUESTION), "targets": features.get("infer_targets"), "problem_choice": batched_problem_choice, } t2t_model._del_dict_nones(predictions) export_out = {"outputs": predictions["outputs"]} if "scores" in predictions: export_out["scores"] = predictions["scores"] return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs={ "output": tf.estimator.export.PredictOutput(export_out) })