def serving_input_fn(): receiver_1 = tf.compat.v1.placeholder(tf.dtypes.string) receiver_2 = tf.compat.v1.placeholder(tf.dtypes.string) receiver_tensors = { 'rec1': receiver_1, u'rec2': receiver_2, } concat = tf.strings.join([receiver_1, receiver_2]) concat2 = tf.identity(concat) features = { 'feature0': tf.strings.join([concat, concat2], ':'), u'feature1': tf.constant([1]) } alternate_tensors = { 'alt_name_1': concat, 'alt_name_2': { 'tensor1': concat, 'tensor2': concat2 } } return export_lib.ServingInputReceiver(features, receiver_tensors, alternate_tensors)
def _serving_input_receiver_fn(): """A receiver function to be passed to export_saved_model.""" placeholders = {} time_placeholder = tf.compat.v1.placeholder( name=feature_keys.TrainEvalFeatures.TIMES, dtype=tf.dtypes.int64, shape=[default_batch_size, default_series_length]) placeholders[ feature_keys.TrainEvalFeatures.TIMES] = time_placeholder # Values are only necessary when filtering. For prediction the default # value will be ignored. placeholders[feature_keys.TrainEvalFeatures.VALUES] = ( tf.compat.v1.placeholder_with_default( name=feature_keys.TrainEvalFeatures.VALUES, input=tf.zeros(shape=[ default_batch_size if default_batch_size else 0, default_series_length if default_series_length else 0, self._model.num_features ], dtype=self._model.dtype), shape=(default_batch_size, default_series_length, self._model.num_features))) if self._model.exogenous_feature_columns: with tf.Graph().as_default(): # Default placeholders have only an unknown batch dimension. Make them # in a separate graph, then splice in the series length to the shapes # and re-create them in the outer graph. parsed_features = ( tf.compat.v1.feature_column.make_parse_example_spec( self._model.exogenous_feature_columns)) placeholder_features = tf.compat.v1.io.parse_example( serialized=tf.compat.v1.placeholder( shape=[None], dtype=tf.dtypes.string), features=parsed_features) exogenous_feature_shapes = { key: (value.get_shape(), value.dtype) for key, value in placeholder_features.items() } for feature_key, (batch_only_feature_shape, value_dtype) in ( exogenous_feature_shapes.items()): batch_only_feature_shape = ( batch_only_feature_shape.with_rank_at_least( 1).as_list()) feature_shape = ( [default_batch_size, default_series_length] + batch_only_feature_shape[1:]) placeholders[feature_key] = tf.compat.v1.placeholder( dtype=value_dtype, name=feature_key, shape=feature_shape) batch_size_tensor = tf.compat.v1.shape(time_placeholder)[0] placeholders.update( self._model_start_state_placeholders( batch_size_tensor, static_batch_size=default_batch_size)) return export_lib.ServingInputReceiver(placeholders, placeholders)
def _serving_input_receiver_fn(): """A receiver function to be passed to export_saved_model.""" times_column = tf.feature_column.numeric_column( key=feature_keys.TrainEvalFeatures.TIMES, dtype=tf.dtypes.int64) values_column = tf.feature_column.numeric_column( key=feature_keys.TrainEvalFeatures.VALUES, dtype=values_input_dtype, shape=(self._model.num_features, )) parsed_features_no_sequence = ( tf.compat.v1.feature_column.make_parse_example_spec( list(self._model.exogenous_feature_columns) + [times_column, values_column])) parsed_features = {} for key, feature_spec in parsed_features_no_sequence.items(): if isinstance(feature_spec, tf.io.FixedLenFeature): if key == feature_keys.TrainEvalFeatures.VALUES: parsed_features[key] = feature_spec._replace( shape=((values_proto_length, ) + feature_spec.shape)) else: parsed_features[key] = feature_spec._replace( shape=((filtering_length + prediction_length, ) + feature_spec.shape)) elif feature_spec.dtype == tf.dtypes.string: parsed_features[key] = tf.io.FixedLenFeature( shape=(filtering_length + prediction_length, ), dtype=tf.dtypes.string) else: # VarLenFeature raise ValueError( "VarLenFeatures not supported, got %s for key %s" % (feature_spec, key)) tfexamples = tf.compat.v1.placeholder(shape=[default_batch_size], dtype=tf.dtypes.string, name="input") features = tf.compat.v1.io.parse_example(serialized=tfexamples, features=parsed_features) features[ feature_keys.TrainEvalFeatures.TIMES] = tf.compat.v1.squeeze( features[feature_keys.TrainEvalFeatures.TIMES], axis=-1) features[feature_keys.TrainEvalFeatures.VALUES] = tf.cast( features[feature_keys.TrainEvalFeatures.VALUES], dtype=self._model.dtype)[:, :filtering_length] features.update( self._model_start_state_placeholders( batch_size_tensor=tf.compat.v1.shape( features[feature_keys.TrainEvalFeatures.TIMES])[0], static_batch_size=default_batch_size)) return export_lib.ServingInputReceiver(features, {"examples": tfexamples})
def _serving_input_receiver_fn(): receiver = tf.compat.v1.placeholder( tf.dtypes.float32, shape=[None, 1], name='input') return export_lib.ServingInputReceiver( features={'feature': receiver}, receiver_tensors=receiver)
def serving_input_receiver_fn(): return export_lib.ServingInputReceiver( {'test-features': constant_op.constant([[1], [1]])}, array_ops.placeholder(dtype=dtypes.string))
def serving_input_receiver_fn(): return export_lib.ServingInputReceiver( {'test-features': tf.constant([[1], [1]])}, tf.compat.v1.placeholder(dtype=tf.dtypes.string))