예제 #1
0
        def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We only assume one input, a string which containes the serialized proto.
            receiver_tensors = {
                'input_example_tensor':
                tf.placeholder(dtype=tf.string,
                               shape=[None],
                               name='input_example_tensor')
            }
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                self._feature_spec)

            # We need to freeze the conditioning and inference shapes.
            for key, value in flat_feature_spec.condition.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_condition_samples_per_task] + ref_shape[1:]
                flat_feature_spec.condition[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            for key, value in flat_feature_spec.inference.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_inference_samples_per_task] + ref_shape[1:]
                flat_feature_spec.inference[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))

            tensor_dict, tensor_spec_dict = (
                tensorspec_utils.tensorspec_to_feature_dict(
                    required_feature_spec))

            parse_tf_example_fn = tfdata.create_parse_tf_example_fn(
                tensor_dict=tensor_dict,
                tensor_spec_dict=tensor_spec_dict,
                feature_tspec=self._feature_spec)

            features = parse_tf_example_fn(
                receiver_tensors['input_example_tensor'])

            if self._preprocess_fn is not None:
                features, _ = self._preprocess_fn(
                    features=features,
                    labels=None,
                    mode=tf.estimator.ModeKeys.PREDICT)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
예제 #2
0
def _generate_assets(model, export_dir):
  in_feature_spec = model.get_feature_specification_for_packing(
      mode=tf.estimator.ModeKeys.PREDICT)
  in_label_spec = model.get_label_specification_for_packing(
      mode=tf.compat.v1.estimator.ModeKeys.PREDICT)

  in_feature_spec = tensorspec_utils.filter_required_flat_tensor_spec(
      in_feature_spec)
  in_label_spec = tensorspec_utils.filter_required_flat_tensor_spec(
      in_label_spec)

  t2r_assets = t2r_pb2.T2RAssets()
  t2r_assets.feature_spec.CopyFrom(in_feature_spec.to_proto())
  t2r_assets.label_spec.CopyFrom(in_label_spec.to_proto())
  t2r_assets_dir = os.path.join(export_dir,
                                tensorspec_utils.EXTRA_ASSETS_DIRECTORY)

  tf.io.gfile.makedirs(t2r_assets_dir)
  t2r_assets_filename = os.path.join(t2r_assets_dir,
                                     tensorspec_utils.T2R_ASSETS_FILENAME)
  tensorspec_utils.write_t2r_assets_to_file(t2r_assets, t2r_assets_filename)
    def get_out_label_specification(self, mode):
        """The specification for the output labels after executing preprocess_fn.

    Note, we strip all optional specs to further reduce communication and
    computation overhead for feeding to TPUs.

    Arguments:
      mode: mode key for this feature specification

    Returns:
      A TensorSpecStruct describing the required and optional tensors.
    """
        return tensorspec_utils.replace_dtype(
            tensorspec_utils.filter_required_flat_tensor_spec(
                self._preprocessor.get_out_label_specification(mode)),
            from_dtype=tf.float32,
            to_dtype=tf.bfloat16)
예제 #4
0
        def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                self._feature_spec)
            # We need to freeze the conditioning and inference shapes.
            for key, value in flat_feature_spec.condition.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_condition_samples_per_task] + ref_shape[1:]
                flat_feature_spec.condition[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            for key, value in flat_feature_spec.inference.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_inference_samples_per_task] + ref_shape[1:]
                flat_feature_spec.inference[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))
            receiver_tensors = tensorspec_utils.make_placeholders(
                required_feature_spec)

            # We want to ensure that our feature processing pipeline operates on a
            # copy of the features and does not alter the receiver_tensors.
            features = tensorspec_utils.flatten_spec_structure(
                copy.copy(receiver_tensors))

            if self._preprocess_fn is not None:
                features, _ = self._preprocess_fn(
                    features=features,
                    labels=None,
                    mode=tf.estimator.ModeKeys.PREDICT)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
        def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We assume one input (a string which containes the serialized proto) per
            # dataset_key.
            feature_spec = self._get_input_features_for_receiver_fn()
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                feature_spec)
            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))
            dataset_keys = set(
                [t.dataset_key for t in required_feature_spec.values()])
            receiver_tensors = {}
            parse_tensors = {}
            for dataset_key in dataset_keys:
                receiver_name = 'input_example_' + six.ensure_str(
                    (dataset_key or 'tensor'))
                parse_tensors[dataset_key] = tf.placeholder(dtype=tf.string,
                                                            shape=[None],
                                                            name=receiver_name)
                receiver_tensors[receiver_name] = parse_tensors[dataset_key]
            parse_tf_example_fn = tfdata.create_parse_tf_example_fn(
                feature_tspec=required_feature_spec)
            features = parse_tf_example_fn(parse_tensors)

            if (not self._export_raw_receivers
                    and self._preprocess_fn is not None):
                features, _ = self._preprocess_fn(features=features,
                                                  labels=None)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
예제 #6
0
    def __init__(self,
                 t2r_model,
                 checkpoint_dir=None,
                 use_gpu=True,
                 timeout=600,
                 tf_intra_op_parallelism_threads=4,
                 tf_inter_op_parallelism_threads=4):
        """Load the model from registry and build the model in a new tf graph.

    Args:
      t2r_model: A T2RModel instance.
      checkpoint_dir: The directory to find the checkpoint. If set to `None`, no
        checkpoint will be loaded and if init_with_random_variables is set to
        True a random model is initialized. Note, either checkpoint_dir or
        init_with_random_variable has to be set but not both.
      use_gpu: If True, will attempt to use GPU for inference.
      timeout: (defaults to 600 seconds) If no checkpoint has been found after
        timeout seconds restore fails.
      tf_intra_op_parallelism_threads: see `tf.ConfigProto`
      tf_inter_op_parallelism_threads: see `tf.ConfigProto`
    """
        self._checkpoint_dir = checkpoint_dir
        self._timeout = timeout

        # As done in model_inference.py, a separate graph is used to build the
        # target network.
        g = tf.Graph()
        mode = tf.estimator.ModeKeys.PREDICT
        with g.as_default():
            preprocessor = t2r_model.preprocessor
            feature_tspec = preprocessor.get_in_feature_specification(mode)
            # We perform inference, hence we only want the required tensors.
            self._feature_tspec = tensorspec_utils.filter_required_flat_tensor_spec(
                feature_tspec)
            label_tspec = preprocessor.get_in_feature_specification(mode)
            self._label_tspec = tensorspec_utils.filter_required_flat_tensor_spec(
                label_tspec)

            self._features = tensorspec_utils.make_placeholders(
                self._feature_tspec, batch_size=None)

            preprocessed_features, _ = preprocessor.preprocess(
                features=self._features, labels=None, mode=mode)
            estimator_spec = t2r_model.model_fn(preprocessed_features, None,
                                                mode)
            self._predictions = estimator_spec.predictions
            config = tf.ConfigProto(
                device_count={'GPU': 1 if use_gpu else 0},
                intra_op_parallelism_threads=tf_intra_op_parallelism_threads,
                inter_op_parallelism_threads=tf_inter_op_parallelism_threads)
            self._sess = tf.Session(graph=g, config=config)
            self._t2r_model = t2r_model
            # The location of the last checkpoint loaded.
            self._current_checkpoint_path = None
            self._tf_global_step = tf.train.get_or_create_global_step()
            # The PREDICT graph is generated which contains only the model specific
            # variables and not training specific variables, e.g. Adam, Momentum.
            var_list = contrib_framework.get_variables()
            self._saver = tf.train.Saver(var_list=var_list)
            # Default init op in case init_randomly is called.
            self._global_init_op = tf.global_variables_initializer()

        self._model_was_restored = False