def _generator_fn(self, batch_size): while True: features = tensorspec_utils.make_constant_numpy( self._feature_spec, self._constant_value, batch_size, self._sequence_length) labels = tensorspec_utils.make_constant_numpy( self._label_spec, self._constant_value, batch_size, self._sequence_length) yield features, labels
def create_warmup_requests_numpy(self, batch_sizes, export_dir): """Creates warm-up requests for a given feature specification. This writes an output file in `export_dir/assets.extra/tf_serving_warmup_requests` for use with Servo. Args: batch_sizes: Batch sizes of warm-up requests to write. export_dir: Base directory for the export. Returns: The filename written. """ feature_spec = self._get_input_features_for_receiver_fn() flat_feature_spec = tensorspec_utils.flatten_spec_structure( feature_spec) tf.io.gfile.makedirs(export_dir) request_filename = os.path.join(export_dir, 'tf_serving_warmup_requests') with tf.python_io.TFRecordWriter(request_filename) as writer: for batch_size in batch_sizes: request = predict_pb2.PredictRequest() request.model_spec.name = self._model_name numpy_feature_specs = tensorspec_utils.make_constant_numpy( flat_feature_spec, constant_value=0, batch_size=batch_size) for key, numpy_spec in numpy_feature_specs.items(): request.inputs[key].CopyFrom( contrib_util.make_tensor_proto(numpy_spec)) log = prediction_log_pb2.PredictionLog( predict_log=prediction_log_pb2.PredictLog(request=request)) writer.write(log.SerializeToString()) return request_filename