示例#1
0
def _colate_features_to_feeds_and_fetches(signature,
                                          features,
                                          graph,
                                          continue_from=None):
    """Uses a saved model signature to construct feed and fetch dictionaries."""
    if continue_from is None:
        state_values = {}
    elif _feature_keys.FilteringResults.STATE_TUPLE in continue_from:
        # We're continuing from an evaluation, so we need to unpack/flatten state.
        state_values = _head.state_to_dictionary(
            continue_from[_feature_keys.FilteringResults.STATE_TUPLE])
    else:
        state_values = continue_from
    input_feed_tensors_by_name = {
        input_key: graph.as_graph_element(input_value.name)
        for input_key, input_value in signature.inputs.items()
    }
    output_tensors_by_name = {
        output_key: graph.as_graph_element(output_value.name)
        for output_key, output_value in signature.outputs.items()
    }
    feed_dict = {}
    for state_key, state_value in state_values.items():
        feed_dict[input_feed_tensors_by_name[state_key]] = state_value
    for feature_key, feature_value in features.items():
        feed_dict[input_feed_tensors_by_name[feature_key]] = feature_value
    return output_tensors_by_name, feed_dict
示例#2
0
 def _model_start_state_placeholders(
     self, batch_size_tensor, static_batch_size=None):
   """Creates placeholders with zeroed start state for the current model."""
   gathered_state = {}
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     # Evaluate the initial state as same-dtype "zero" values. These zero
     # constants aren't used, but are necessary for feeding to
     # placeholder_with_default for the "cold start" case where state is not
     # fed to the model.
     def _zeros_like_constant(tensor):
       return tensor_util.constant_value(array_ops.zeros_like(tensor))
     start_state = nest.map_structure(
         _zeros_like_constant, self._model.get_start_state())
   for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
       start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (static_batch_size,)).concatenate(state.shape)
     default_state_broadcast = array_ops.tile(
         state[None, ...],
         multiples=array_ops.concat(
             [batch_size_tensor[None],
              array_ops.ones(len(state.shape), dtype=dtypes.int32)],
             axis=0))
     gathered_state[prefixed_state_name] = array_ops.placeholder_with_default(
         input=default_state_broadcast,
         name=prefixed_state_name,
         shape=state_shape_with_batch)
   return gathered_state
示例#3
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
       array_ops.placeholder(
           name=feature_keys.TrainEvalFeatures.TIMES,
           dtype=dtypes.int64,
           shape=[default_batch_size, default_series_length]))
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   if self._model.exogenous_feature_columns:
     with ops.Graph().as_default():
       # Default placeholders have only an unknown batch dimension. Make them
       # in a separate graph, then splice in the series length to the shapes
       # and re-create them in the outer graph.
       parsed_features = (
           feature_column.make_parse_example_spec(
               self._model.exogenous_feature_columns))
       placeholder_features = parsing_ops.parse_example(
           serialized=array_ops.placeholder(
               shape=[None], dtype=dtypes.string),
           features=parsed_features)
       exogenous_feature_shapes = {
           key: (value.get_shape(), value.dtype) for key, value
           in placeholder_features.items()}
     for feature_key, (batch_only_feature_shape, value_dtype) in (
         exogenous_feature_shapes.items()):
       batch_only_feature_shape = (
           batch_only_feature_shape.with_rank_at_least(1).as_list())
       feature_shape = ([default_batch_size, default_series_length]
                        + batch_only_feature_shape[1:])
       placeholders[feature_key] = array_ops.placeholder(
           dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     model_start_state = self._model.get_start_state()
   for prefixed_state_name, state_tensor in ts_head_lib.state_to_dictionary(
       model_start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state_tensor.get_shape())
     placeholders[prefixed_state_name] = array_ops.placeholder(
         name=prefixed_state_name,
         shape=state_shape_with_batch,
         dtype=state_tensor.dtype)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
示例#4
0
def _colate_features_to_feeds_and_fetches(signature, features, graph,
                                          continue_from=None):
  """Uses a saved model signature to construct feed and fetch dictionaries."""
  if continue_from is None:
    state_values = {}
  elif _feature_keys.FilteringResults.STATE_TUPLE in continue_from:
    # We're continuing from an evaluation, so we need to unpack/flatten state.
    state_values = _head.state_to_dictionary(
        continue_from[_feature_keys.FilteringResults.STATE_TUPLE])
  else:
    state_values = continue_from
  input_feed_tensors_by_name = {
      input_key: graph.as_graph_element(input_value.name)
      for input_key, input_value in signature.inputs.items()
  }
  output_tensors_by_name = {
      output_key: graph.as_graph_element(output_value.name)
      for output_key, output_value in signature.outputs.items()
  }
  feed_dict = {}
  for state_key, state_value in state_values.items():
    feed_dict[input_feed_tensors_by_name[state_key]] = state_value
  for feature_key, feature_value in features.items():
    feed_dict[input_feed_tensors_by_name[feature_key]] = feature_value
  return output_tensors_by_name, feed_dict
示例#5
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
       array_ops.placeholder(
           name=feature_keys.TrainEvalFeatures.TIMES,
           dtype=dtypes.int64,
           shape=[default_batch_size, default_series_length]))
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   with ops.Graph().as_default():
     # Default placeholders have only an unknown batch dimension. Make them
     # in a separate graph, then splice in the series length to the shapes
     # and re-create them in the outer graph.
     exogenous_feature_shapes = {
         key: (value.get_shape(), value.dtype) for key, value
         in feature_column.make_place_holder_tensors_for_base_features(
             self._model.exogenous_feature_columns).items()}
   for feature_key, (batch_only_feature_shape, value_dtype) in (
       exogenous_feature_shapes.items()):
     batch_only_feature_shape = batch_only_feature_shape.with_rank_at_least(
         1).as_list()
     feature_shape = ([default_batch_size, default_series_length]
                      + batch_only_feature_shape[1:])
     placeholders[feature_key] = array_ops.placeholder(
         dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     model_start_state = self._model.get_start_state()
   for prefixed_state_name, state_tensor in ts_head_lib.state_to_dictionary(
       model_start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state_tensor.get_shape())
     placeholders[prefixed_state_name] = array_ops.placeholder(
         name=prefixed_state_name,
         shape=state_shape_with_batch,
         dtype=state_tensor.dtype)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
示例#6
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
       array_ops.placeholder(
           name=feature_keys.TrainEvalFeatures.TIMES,
           dtype=dtypes.int64,
           shape=[default_batch_size, default_series_length]))
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   for feature_key, feature_value in exogenous_features.items():
     value_tensor = ops.convert_to_tensor(feature_value)
     value_tensor.get_shape().with_rank_at_least(2)
     feature_shape = value_tensor.get_shape().as_list()
     feature_shape[0] = default_batch_size
     feature_shape[1] = default_series_length
     placeholders[feature_key] = array_ops.placeholder(
         dtype=value_tensor.dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     model_start_state = self._model.get_start_state()
   for prefixed_state_name, state_tensor in ts_head_lib.state_to_dictionary(
       model_start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state_tensor.get_shape())
     placeholders[prefixed_state_name] = array_ops.placeholder(
         name=prefixed_state_name,
         shape=state_shape_with_batch,
         dtype=state_tensor.dtype)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
示例#7
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   time_placeholder = array_ops.placeholder(
       name=feature_keys.TrainEvalFeatures.TIMES,
       dtype=dtypes.int64,
       shape=[default_batch_size, default_series_length])
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = time_placeholder
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   if self._model.exogenous_feature_columns:
     with ops.Graph().as_default():
       # Default placeholders have only an unknown batch dimension. Make them
       # in a separate graph, then splice in the series length to the shapes
       # and re-create them in the outer graph.
       parsed_features = (
           feature_column.make_parse_example_spec(
               self._model.exogenous_feature_columns))
       placeholder_features = parsing_ops.parse_example(
           serialized=array_ops.placeholder(
               shape=[None], dtype=dtypes.string),
           features=parsed_features)
       exogenous_feature_shapes = {
           key: (value.get_shape(), value.dtype) for key, value
           in placeholder_features.items()}
     for feature_key, (batch_only_feature_shape, value_dtype) in (
         exogenous_feature_shapes.items()):
       batch_only_feature_shape = (
           batch_only_feature_shape.with_rank_at_least(1).as_list())
       feature_shape = ([default_batch_size, default_series_length]
                        + batch_only_feature_shape[1:])
       placeholders[feature_key] = array_ops.placeholder(
           dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     # Evaluate the initial state as same-dtype "zero" values. These zero
     # constants aren't used, but are necessary for feeding to
     # placeholder_with_default for the "cold start" case where state is not
     # fed to the model.
     def _zeros_like_constant(tensor):
       return tensor_util.constant_value(array_ops.zeros_like(tensor))
     start_state = nest.map_structure(
         _zeros_like_constant, self._model.get_start_state())
   batch_size_tensor = array_ops.shape(time_placeholder)[0]
   for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
       start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state.shape)
     default_state_broadcast = array_ops.tile(
         state[None, ...],
         multiples=array_ops.concat(
             [batch_size_tensor[None],
              array_ops.ones(len(state.shape), dtype=dtypes.int32)],
             axis=0))
     placeholders[prefixed_state_name] = array_ops.placeholder_with_default(
         input=default_state_broadcast,
         name=prefixed_state_name,
         shape=state_shape_with_batch)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   time_placeholder = array_ops.placeholder(
       name=feature_keys.TrainEvalFeatures.TIMES,
       dtype=dtypes.int64,
       shape=[default_batch_size, default_series_length])
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = time_placeholder
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   if self._model.exogenous_feature_columns:
     with ops.Graph().as_default():
       # Default placeholders have only an unknown batch dimension. Make them
       # in a separate graph, then splice in the series length to the shapes
       # and re-create them in the outer graph.
       parsed_features = (
           feature_column.make_parse_example_spec(
               self._model.exogenous_feature_columns))
       placeholder_features = parsing_ops.parse_example(
           serialized=array_ops.placeholder(
               shape=[None], dtype=dtypes.string),
           features=parsed_features)
       exogenous_feature_shapes = {
           key: (value.get_shape(), value.dtype) for key, value
           in placeholder_features.items()}
     for feature_key, (batch_only_feature_shape, value_dtype) in (
         exogenous_feature_shapes.items()):
       batch_only_feature_shape = (
           batch_only_feature_shape.with_rank_at_least(1).as_list())
       feature_shape = ([default_batch_size, default_series_length]
                        + batch_only_feature_shape[1:])
       placeholders[feature_key] = array_ops.placeholder(
           dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     # Evaluate the initial state as same-dtype "zero" values. These zero
     # constants aren't used, but are necessary for feeding to
     # placeholder_with_default for the "cold start" case where state is not
     # fed to the model.
     def _zeros_like_constant(tensor):
       return tensor_util.constant_value(array_ops.zeros_like(tensor))
     start_state = nest.map_structure(
         _zeros_like_constant, self._model.get_start_state())
   batch_size_tensor = array_ops.shape(time_placeholder)[0]
   for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
       start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state.shape)
     default_state_broadcast = array_ops.tile(
         state[None, ...],
         multiples=array_ops.concat(
             [batch_size_tensor[None],
              array_ops.ones(len(state.shape), dtype=dtypes.int32)],
             axis=0))
     placeholders[prefixed_state_name] = array_ops.placeholder_with_default(
         input=default_state_broadcast,
         name=prefixed_state_name,
         shape=state_shape_with_batch)
   return export_lib.ServingInputReceiver(placeholders, placeholders)