Beispiel #1
0
 def _serving_input_receiver_fn():
     """A receiver function to be passed to export_savedmodel."""
     placeholders = {}
     time_placeholder = array_ops.placeholder(
         name=feature_keys.TrainEvalFeatures.TIMES,
         dtype=dtypes.int64,
         shape=[default_batch_size, default_series_length])
     placeholders[
         feature_keys.TrainEvalFeatures.TIMES] = time_placeholder
     # Values are only necessary when filtering. For prediction the default
     # value will be ignored.
     placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
         array_ops.placeholder_with_default(
             name=feature_keys.TrainEvalFeatures.VALUES,
             input=array_ops.zeros(shape=[
                 default_batch_size if default_batch_size else 0,
                 default_series_length if default_series_length else 0,
                 self._model.num_features
             ],
                                   dtype=self._model.dtype),
             shape=(default_batch_size, default_series_length,
                    self._model.num_features)))
     if self._model.exogenous_feature_columns:
         with ops.Graph().as_default():
             # Default placeholders have only an unknown batch dimension. Make them
             # in a separate graph, then splice in the series length to the shapes
             # and re-create them in the outer graph.
             parsed_features = (feature_column.make_parse_example_spec(
                 self._model.exogenous_feature_columns))
             placeholder_features = parsing_ops.parse_example(
                 serialized=array_ops.placeholder(shape=[None],
                                                  dtype=dtypes.string),
                 features=parsed_features)
             exogenous_feature_shapes = {
                 key: (value.get_shape(), value.dtype)
                 for key, value in placeholder_features.items()
             }
         for feature_key, (batch_only_feature_shape, value_dtype) in (
                 exogenous_feature_shapes.items()):
             batch_only_feature_shape = (
                 batch_only_feature_shape.with_rank_at_least(
                     1).as_list())
             feature_shape = (
                 [default_batch_size, default_series_length] +
                 batch_only_feature_shape[1:])
             placeholders[feature_key] = array_ops.placeholder(
                 dtype=value_dtype,
                 name=feature_key,
                 shape=feature_shape)
     batch_size_tensor = array_ops.shape(time_placeholder)[0]
     placeholders.update(
         self._model_start_state_placeholders(
             batch_size_tensor, static_batch_size=default_batch_size))
     return export_lib.ServingInputReceiver(placeholders, placeholders)
Beispiel #2
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
       array_ops.placeholder(
           name=feature_keys.TrainEvalFeatures.TIMES,
           dtype=dtypes.int64,
           shape=[default_batch_size, default_series_length]))
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   with ops.Graph().as_default():
     # Default placeholders have only an unknown batch dimension. Make them
     # in a separate graph, then splice in the series length to the shapes
     # and re-create them in the outer graph.
     exogenous_feature_shapes = {
         key: (value.get_shape(), value.dtype) for key, value
         in feature_column.make_place_holder_tensors_for_base_features(
             self._model.exogenous_feature_columns).items()}
   for feature_key, (batch_only_feature_shape, value_dtype) in (
       exogenous_feature_shapes.items()):
     batch_only_feature_shape = batch_only_feature_shape.with_rank_at_least(
         1).as_list()
     feature_shape = ([default_batch_size, default_series_length]
                      + batch_only_feature_shape[1:])
     placeholders[feature_key] = array_ops.placeholder(
         dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     model_start_state = self._model.get_start_state()
   for prefixed_state_name, state_tensor in ts_head_lib.state_to_dictionary(
       model_start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state_tensor.get_shape())
     placeholders[prefixed_state_name] = array_ops.placeholder(
         name=prefixed_state_name,
         shape=state_shape_with_batch,
         dtype=state_tensor.dtype)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
Beispiel #3
0
 def _serving_input_receiver_fn():
     """A receiver function to be passed to export_savedmodel."""
     times_column = feature_column.numeric_column(
         key=feature_keys.TrainEvalFeatures.TIMES, dtype=dtypes.int64)
     values_column = feature_column.numeric_column(
         key=feature_keys.TrainEvalFeatures.VALUES,
         dtype=values_input_dtype,
         shape=(self._model.num_features, ))
     parsed_features_no_sequence = (
         feature_column.make_parse_example_spec(
             list(self._model.exogenous_feature_columns) +
             [times_column, values_column]))
     parsed_features = {}
     for key, feature_spec in parsed_features_no_sequence.items():
         if isinstance(feature_spec, parsing_ops.FixedLenFeature):
             if key == feature_keys.TrainEvalFeatures.VALUES:
                 parsed_features[key] = feature_spec._replace(
                     shape=((values_proto_length, ) +
                            feature_spec.shape))
             else:
                 parsed_features[key] = feature_spec._replace(
                     shape=((filtering_length + prediction_length, ) +
                            feature_spec.shape))
         elif feature_spec.dtype == dtypes.string:
             parsed_features[key] = parsing_ops.FixedLenFeature(
                 shape=(filtering_length + prediction_length, ),
                 dtype=dtypes.string)
         else:  # VarLenFeature
             raise ValueError(
                 "VarLenFeatures not supported, got %s for key %s" %
                 (feature_spec, key))
     tfexamples = array_ops.placeholder(shape=[default_batch_size],
                                        dtype=dtypes.string,
                                        name="input")
     features = parsing_ops.parse_example(serialized=tfexamples,
                                          features=parsed_features)
     features[feature_keys.TrainEvalFeatures.TIMES] = array_ops.squeeze(
         features[feature_keys.TrainEvalFeatures.TIMES], axis=-1)
     features[feature_keys.TrainEvalFeatures.VALUES] = math_ops.cast(
         features[feature_keys.TrainEvalFeatures.VALUES],
         dtype=self._model.dtype)[:, :filtering_length]
     features.update(
         self._model_start_state_placeholders(
             batch_size_tensor=array_ops.shape(
                 features[feature_keys.TrainEvalFeatures.TIMES])[0],
             static_batch_size=default_batch_size))
     return export_lib.ServingInputReceiver(features,
                                            {"examples": tfexamples})
Beispiel #4
0
 def _serving_input_receiver_fn():
     """A receiver function to be passed to export_savedmodel."""
     placeholders = {}
     placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
         array_ops.placeholder(
             name=feature_keys.TrainEvalFeatures.TIMES,
             dtype=dtypes.int64,
             shape=[default_batch_size, default_series_length]))
     # Values are only necessary when filtering. For prediction the default
     # value will be ignored.
     placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
         array_ops.placeholder_with_default(
             name=feature_keys.TrainEvalFeatures.VALUES,
             input=array_ops.zeros(shape=[
                 default_batch_size if default_batch_size else 0,
                 default_series_length if default_series_length else 0,
                 self._model.num_features
             ],
                                   dtype=self._model.dtype),
             shape=(default_batch_size, default_series_length,
                    self._model.num_features)))
     for feature_key, feature_value in exogenous_features.items():
         value_tensor = ops.convert_to_tensor(feature_value)
         value_tensor.get_shape().with_rank_at_least(2)
         feature_shape = value_tensor.get_shape().as_list()
         feature_shape[0] = default_batch_size
         feature_shape[1] = default_series_length
         placeholders[feature_key] = array_ops.placeholder(
             dtype=value_tensor.dtype,
             name=feature_key,
             shape=feature_shape)
     # Models may not know the shape of their state without creating some
     # variables/ops. Avoid polluting the default graph by making a new one. We
     # use only static metadata from the returned Tensors.
     with ops.Graph().as_default():
         self._model.initialize_graph()
         model_start_state = self._model.get_start_state()
     for prefixed_state_name, state_tensor in model_utils.state_to_dictionary(
             model_start_state).items():
         state_shape_with_batch = tensor_shape.TensorShape(
             (default_batch_size, )).concatenate(
                 state_tensor.get_shape())
         placeholders[prefixed_state_name] = array_ops.placeholder(
             name=prefixed_state_name,
             shape=state_shape_with_batch,
             dtype=state_tensor.dtype)
     return export_lib.ServingInputReceiver(placeholders, placeholders)
Beispiel #5
0
 def _input_fn():
     with ops.name_scope('inputs'):
         x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
         y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
     label = constant_op.constant(0.0)
     features = {'x': x, 'y': y}
     if core:
         if train:
             return features, label
         return export_lib.ServingInputReceiver(features=features,
                                                receiver_tensors=features)
     else:
         if train:
             return features, label
         return input_fn_utils.InputFnOps(features=features,
                                          labels={},
                                          default_inputs=features)
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   time_placeholder = array_ops.placeholder(
       name=feature_keys.TrainEvalFeatures.TIMES,
       dtype=dtypes.int64,
       shape=[default_batch_size, default_series_length])
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = time_placeholder
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   if self._model.exogenous_feature_columns:
     with ops.Graph().as_default():
       # Default placeholders have only an unknown batch dimension. Make them
       # in a separate graph, then splice in the series length to the shapes
       # and re-create them in the outer graph.
       parsed_features = (
           feature_column.make_parse_example_spec(
               self._model.exogenous_feature_columns))
       placeholder_features = parsing_ops.parse_example(
           serialized=array_ops.placeholder(
               shape=[None], dtype=dtypes.string),
           features=parsed_features)
       exogenous_feature_shapes = {
           key: (value.get_shape(), value.dtype) for key, value
           in placeholder_features.items()}
     for feature_key, (batch_only_feature_shape, value_dtype) in (
         exogenous_feature_shapes.items()):
       batch_only_feature_shape = (
           batch_only_feature_shape.with_rank_at_least(1).as_list())
       feature_shape = ([default_batch_size, default_series_length]
                        + batch_only_feature_shape[1:])
       placeholders[feature_key] = array_ops.placeholder(
           dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     # Evaluate the initial state as same-dtype "zero" values. These zero
     # constants aren't used, but are necessary for feeding to
     # placeholder_with_default for the "cold start" case where state is not
     # fed to the model.
     def _zeros_like_constant(tensor):
       return tensor_util.constant_value(array_ops.zeros_like(tensor))
     start_state = nest.map_structure(
         _zeros_like_constant, self._model.get_start_state())
   batch_size_tensor = array_ops.shape(time_placeholder)[0]
   for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
       start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state.shape)
     default_state_broadcast = array_ops.tile(
         state[None, ...],
         multiples=array_ops.concat(
             [batch_size_tensor[None],
              array_ops.ones(len(state.shape), dtype=dtypes.int32)],
             axis=0))
     placeholders[prefixed_state_name] = array_ops.placeholder_with_default(
         input=default_state_broadcast,
         name=prefixed_state_name,
         shape=state_shape_with_batch)
   return export_lib.ServingInputReceiver(placeholders, placeholders)