def test_combines_sequence_length_with_signature_if_not_emit_timestamps( self): server = reverb_server.Server([ reverb_server.Table.queue('queue', 10, signature={ 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, }) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100, emit_timesteps=False, sequence_length=5) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([5, 3, 3], tf.float32), 'c': tf.TensorSpec([5], tf.int64), }, })
def test_tf_saved_model_save_multiple_signatures(self): base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save') export_path = os.path.join(base_path, '00000123') root = tf.train.Checkpoint() root.f = tf.function(lambda x: {'y': 1.}, input_signature=[tf.TensorSpec(None, tf.float32)]) root.g = tf.function(lambda x: {'y': 2.}, input_signature=[tf.TensorSpec(None, tf.float32)]) tf.saved_model.experimental.save( root, export_path, signatures={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.f, 'custom_signature_key': root.g }) _, model_server_address, _ = TensorflowModelServerTest.RunServer( 'default', base_path) expected_version = self._GetModelVersion(base_path) self.VerifyPredictRequest(model_server_address, expected_output=2.0, expected_version=expected_version, signature_name='custom_signature_key') self.VerifyPredictRequest(model_server_address, expected_output=1.0, expected_version=expected_version)
def test_iterate_nested_and_batched(self): with self._client.writer(100) as writer: for i in range(1000): writer.append({ 'observation': { 'data': np.zeros((3, 3), dtype=np.float32), 'extras': [ np.int64(10), np.ones([1], dtype=np.int32), ], }, 'reward': np.zeros((10, 10), dtype=np.float32), }) if i % 5 == 0 and i >= 100: writer.create_item(table='dist', num_timesteps=100, priority=1) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32), shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None), tf.TensorShape([1]))), tf.TensorShape([10, 10])), max_in_flight_samples_per_worker=100) dataset = dataset.batch(3) structure = { 'observation': { 'data': tf.TensorSpec([3, 3], tf.float32), 'extras': [ tf.TensorSpec([], tf.int64), tf.TensorSpec([1], tf.int32), ], }, 'reward': tf.TensorSpec([], tf.int64), } got = self._sample_from(dataset, 10) self.assertLen(got, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) transition = tree.unflatten_as(structure, tree.flatten(sample.data)) np.testing.assert_array_equal( transition['observation']['data'], np.zeros([3, 3, 3], dtype=np.float32)) np.testing.assert_array_equal( transition['observation']['extras'][0], np.ones([3], dtype=np.int64) * 10) np.testing.assert_array_equal( transition['observation']['extras'][1], np.ones([3, 1], dtype=np.int32)) np.testing.assert_array_equal( transition['reward'], np.zeros([3, 10, 10], dtype=np.float32))
class ResizeImageModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[tf.TensorSpec([1, 52, 37, 1], tf.int32)]) def downsample_nearest_neighbor(self, image): size = np.asarray([8, 7], dtype=np.int32) return tf.image.resize_nearest_neighbor(image, size) @tf.function(input_signature=[tf.TensorSpec([1, 8, 7, 1], tf.int32)]) def upsample_nearest_neighbor(self, image): size = np.asarray([52, 37], dtype=np.int32) return tf.image.resize_nearest_neighbor(image, size)
class ImageModel(tf.train.Checkpoint): """Dummy image model.""" def __init__(self): super(ImageModel, self).__init__() self.v = tf.Variable(1., use_resource=True) @tf.function(input_signature=[ tf.TensorSpec(name="input", shape=[32, 224, 224, 3], dtype=tf.float32), tf.TensorSpec(name="training", shape=None, dtype=tf.bool), ]) def __call__(self, x, training): return tf.reduce_mean(x, axis=[1, 2]) + self.v
def test_sets_dtypes_from_signature(self): signature = { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, 'x': tf.TensorSpec([None], tf.uint64), } server = reverb_server.Server( [reverb_server.Table.queue('queue', 10, signature=signature)]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual(dataset.element_spec.data, signature)
def test_server_info(self): self.client.insert([0], {TABLE_NAME: 1.0}) self.client.insert([0], {TABLE_NAME: 1.0}) self.client.insert([0], {TABLE_NAME: 1.0}) list(self.client.sample(TABLE_NAME, 1)) server_info = self.client.server_info() self.assertLen(server_info, 3) self.assertIn(TABLE_NAME, server_info) table = server_info[TABLE_NAME] self.assertEqual(table.current_size, 3) self.assertEqual(table.num_unique_samples, 1) self.assertEqual(table.max_size, 1000) self.assertEqual(table.sampler_options.prioritized.priority_exponent, 1) self.assertTrue(table.remover_options.fifo) self.assertEqual(table.signature, tf.TensorSpec(dtype=tf.int64, shape=[])) self.assertIn(NESTED_SIGNATURE_TABLE_NAME, server_info) queue = server_info[NESTED_SIGNATURE_TABLE_NAME] self.assertEqual(queue.current_size, 0) self.assertEqual(queue.num_unique_samples, 0) self.assertEqual(queue.max_size, 10) self.assertTrue(queue.sampler_options.fifo) self.assertTrue(queue.remover_options.fifo) self.assertEqual(queue.signature, QUEUE_SIGNATURE) self.assertIn(SIMPLE_QUEUE_NAME, server_info) info = server_info[SIMPLE_QUEUE_NAME] self.assertEqual(info.current_size, 0) self.assertEqual(info.num_unique_samples, 0) self.assertEqual(info.max_size, 10) self.assertTrue(info.sampler_options.fifo) self.assertTrue(info.remover_options.fifo) self.assertIsNone(info.signature)
def make_server(): return server.Server( tables=[ server.Table( 'dist', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1)), server.Table( 'dist2', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1)), server.Table( 'signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))), ], port=None, )
def make_server(): return reverb_server.Server( tables=[ reverb_server.Table( 'dist', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1)), reverb_server.Table( 'signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))), reverb_server.Table( 'bounded_spec_signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), # Currently only the `shape` and `dtype` of the bounded spec # is considered during signature check. # TODO(b/158033101): Check the boundaries as well. signature=tensor_spec.BoundedTensorSpec(dtype=tf.float32, shape=(None, None), minimum=(0.0, 0.0), maximum=(10.0, 10.)), ), ], port=None, )
def get_stylize_fn(): """Creates a tf.function for stylization.""" input_spec = [ tf.TensorSpec((None, None, None, 3), tf.float32), tf.TensorSpec((None, None, None, 3), tf.float32) ] predict_feeds = [] predict_fetches = [] def umbrella_function(content_img, style_img): predict_feeds.extend([content_img, style_img]) predict_result = build_network(content_img, style_img) predict_fetches.extend([ predict_result, ]) return predict_result umbrella_wrapped = tf.compat.v1.wrap_function(umbrella_function, input_spec) fn = umbrella_wrapped.prune(predict_feeds, predict_fetches) return fn
def test_sets_dtypes_from_bounded_spec_signature(self): bounded_spec_signature = { 'a': { 'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3), 'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5), }, } server = reverb_server.Server([ reverb_server.Table.queue( 'queue', 10, signature=bounded_spec_signature) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, })
def test_server_info(self): self.client.insert([0], {TABLE_NAME: 1.0}) self.client.insert([0], {TABLE_NAME: 1.0}) self.client.insert([0], {TABLE_NAME: 1.0}) server_info = self.client.server_info() self.assertLen(server_info, 1) self.assertIn(TABLE_NAME, server_info) info = server_info[TABLE_NAME] self.assertEqual(info.current_size, 3) self.assertEqual(info.max_size, 1000) self.assertEqual(info.sampler_options.prioritized.priority_exponent, 1) self.assertTrue(info.remover_options.fifo) self.assertEqual(info.signature, tf.TensorSpec(dtype=tf.int64, shape=()))
def setUpClass(cls): super().setUpClass() cls.server = server.Server(tables=[ server.Table( name=TABLE_NAME, sampler=item_selectors.Prioritized(1), remover=item_selectors.Fifo(), max_size=1000, rate_limiter=rate_limiters.MinSize(3), signature=tf.TensorSpec(dtype=tf.int64, shape=()), ), ], port=None) cls.client = client.Client(f'localhost:{cls.server.port}')
def test_tf_saved_model_save(self): base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save') export_path = os.path.join(base_path, '00000123') root = tf.train.Checkpoint() root.v1 = tf.Variable(3.) root.v2 = tf.Variable(2.) root.f = tf.function(lambda x: {'y': root.v1 * root.v2 * x}) to_save = root.f.get_concrete_function(tf.TensorSpec(None, tf.float32)) tf.saved_model.experimental.save(root, export_path, to_save) _, model_server_address, _ = TensorflowModelServerTest.RunServer( 'default', base_path) expected_version = self._GetModelVersion(base_path) self.VerifyPredictRequest(model_server_address, expected_output=12.0, specify_output=False, expected_version=expected_version)
def setUpClass(cls): super().setUpClass() cls.tables = [ server.Table( name=TABLE_NAME, sampler=item_selectors.Prioritized(1), remover=item_selectors.Fifo(), max_size=1000, rate_limiter=rate_limiters.MinSize(3), signature=tf.TensorSpec(dtype=tf.int64, shape=[]), ), server.Table.queue( name=NESTED_SIGNATURE_TABLE_NAME, max_size=10, signature=QUEUE_SIGNATURE, ), server.Table.queue(SIMPLE_QUEUE_NAME, 10), ] cls.server = server.Server(tables=cls.tables) cls.client = cls.server.localhost_client()
def __init__( self, mode, img_shape: Tuple[int, int] = (160, 160), alpha: float = 0.8, stride: int = 1, min_face_size: int = 40, ) -> None: assert mode in ("mtcnn", "trt-mtcnn"), f"{mode} not supported" self.mode = mode self.alpha = alpha self.img_shape = tuple(img_shape) self.stride = stride self.min_face_size = min_face_size self.frame_ct = 0 self._cached_result = None if "trt-mtcnn" in mode: sys.path.insert(1, "../util/trt_mtcnn_plugin") from util.trt_mtcnn_plugin.trt_mtcnn import TrtMTCNNWrapper # noqa engine_paths = [ f"../util/trt_mtcnn_plugin/mtcnn/det{i+1}.engine" for i in range(3) ] self.trt_mtcnn = TrtMTCNNWrapper(*engine_paths) if "mtcnn" in mode.replace("trt-mtcnn", ""): import tensorflow.compat.v1 as tf # noqa assert tf.executing_eagerly(), ( "[internal] launch failed, tf not eager." "Check that tensorflow>=2.3 and that eager exec enabled") mpath = CONFIG_HOME + "/models/mtcnn.pb" self.mtcnn = tf.wrap_function( get_mtcnn(mpath, min_size=float(self.min_face_size)), [tf.TensorSpec(shape=[None, None, 3], dtype=tf.float32)], )
def _as_dataset(*args, **kwargs): """Creates a mock TFDS graphs dataset.""" del args, kwargs def get_iter(): return ( _make_graph(num_nodes, num_edges, labels) for num_nodes, num_edges, labels in zip(NUMS_NODES, NUMS_EDGES, LABELS)) return tf.data.Dataset.from_generator( get_iter, output_signature={ 'edge_feat': tf.TensorSpec(shape=(None, 3), dtype=np.float32), 'edge_index': tf.TensorSpec(shape=(None, 2), dtype=np.int64), 'labels': tf.TensorSpec(shape=(NUM_LABELS,), dtype=np.float32), 'node_feat': tf.TensorSpec(shape=(None, 9), dtype=np.float32), 'num_edges': tf.TensorSpec(shape=(1,), dtype=np.int64), 'num_nodes': tf.TensorSpec(shape=(1,), dtype=np.int64), })
from absl.testing import absltest import numpy as np import portpicker from reverb import client from reverb import errors from reverb import item_selectors from reverb import rate_limiters from reverb import server import tensorflow.compat.v1 as tf import tree TABLE_NAME = 'table' NESTED_SIGNATURE_TABLE_NAME = 'nested_signature_table' SIMPLE_QUEUE_NAME = 'simple_queue' QUEUE_SIGNATURE = { 'a': tf.TensorSpec(dtype=tf.int64, shape=(3,)), 'b': tf.TensorSpec(dtype=tf.float32, shape=(3, 2, 2)), } class ClientTest(absltest.TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.tables = [ server.Table( name=TABLE_NAME, sampler=item_selectors.Prioritized(1), remover=item_selectors.Fifo(), max_size=1000,
wav_encoder = tf.audio.encode_wav(wav_data_placeholder, sample_rate_placeholder) wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder) sess.run(wav_saver, feed_dict={ wav_filename_placeholder: filename, sample_rate_placeholder: sample_rate, wav_data_placeholder: np.reshape(wav_data, (-1, 1)) }) def np_load(x): return np.load(x) @tf.function(input_signature=[tf.TensorSpec(None, tf.string)]) def tf_np_load(inputs): y = tf.numpy_function(np_load, [inputs], tf.float32) return y class AudioProcessor(object): """Handles loading, partitioning, and preparing audio training data. Args: flags: data and model parameters, described at model_train_eval.py """ def __init__(self, flags): wanted_words = flags.wanted_words.split(',') if flags.wav: file_ext = '*.wav'
class HalfPlusTwoModel(tf.Module): """Native TF2 half-plus-two model.""" def __init__(self): self.a = tf.Variable(0.5, name="a") self.b = tf.Variable(2.0, name="b") self.c = tf.Variable(3.0, name="c") self.asset = _create_asset_file(tf2=True) def compute(self, x, inc): return tf.add(tf.multiply(self.a, x), inc) def get_serving_signatures(self): return { "regress_x_to_y": self.regress_xy, "regress_x_to_y2": self.regress_xy2, "regress_x2_to_y3": self.regress_x2y3, "classify_x_to_y": self.classify_xy, "classify_x2_to_y3": self.classify_x2y3, tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: self.predict, } @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)]) def predict(self, x): return {"y": self.compute(x, self.b)} @tf.function(input_signature=[ tf.TensorSpec([None], dtype=tf.string, name=tf.saved_model.REGRESS_INPUTS) ]) def regress_xy(self, serialized_proto): x = tf.parse_example(serialized_proto, _get_feature_spec())["x"] return {tf.saved_model.REGRESS_OUTPUTS: self.compute(x, self.b)} @tf.function(input_signature=[ tf.TensorSpec([None], dtype=tf.string, name=tf.saved_model.REGRESS_INPUTS) ]) def regress_xy2(self, serialized_proto): x = tf.parse_example(serialized_proto, _get_feature_spec())["x"] return {tf.saved_model.REGRESS_OUTPUTS: self.compute(x, self.c)} @tf.function(input_signature=[ tf.TensorSpec( shape=[1], dtype=tf.float32, name=tf.saved_model.REGRESS_INPUTS) ]) def regress_x2y3(self, x2): return {tf.saved_model.REGRESS_OUTPUTS: self.compute(x2, self.c)} @tf.function(input_signature=[ tf.TensorSpec([None], dtype=tf.string, name=tf.saved_model.CLASSIFY_INPUTS) ]) def classify_xy(self, serialized_proto): x = tf.parse_example(serialized_proto, _get_feature_spec())["x"] return {tf.saved_model.CLASSIFY_OUTPUT_SCORES: self.compute(x, self.b)} @tf.function(input_signature=[ tf.TensorSpec( shape=[1], dtype=tf.float32, name=tf.saved_model.CLASSIFY_INPUTS) ]) def classify_x2y3(self, x2): return {tf.saved_model.CLASSIFY_OUTPUT_SCORES: self.compute(x2, self.c)}
sentence_breaking_assert, sentence_breaking_v2_assert, sentencepiece_assert, sentencepiece_id_assert, sentencepiece_size_assert, split_merge_assert, split_merge_from_logits_assert, tf_unicode_script_assert, unicode_script_assert, whitespace_assert, wordpiece_assert, wordshapes_assert ]): y = tf.add(x, [1]) return {'y': y} module = TfTextOps() export_path = tempfile.TemporaryDirectory() print('Exporting saved model to ', export_path) call = module.__call__.get_concrete_function( tf.TensorSpec([1], tf.float32, 'x')) tf.saved_model.save(module, export_path.name, call) # Copy files from temp directory print('Moving files:') for src_dir, dirs, files in os.walk(export_path.name): dst_dir = src_dir.replace(export_path.name, FLAGS.dest, 1) if not os.path.exists(dst_dir): os.makedirs(dst_dir) for file_ in files: print(file_) src_file = os.path.join(src_dir, file_) dst_file = os.path.join(dst_dir, file_) if os.path.exists(dst_file): # in case of the src and dst are the same file if os.path.samefile(src_file, dst_file):
def inputs(self): return [ tf.TensorSpec([None, None, None, 3], tf.float32, 'image'), tf.TensorSpec([None, None, None], tf.int32, 'edgemap') ]
def input_spec(self): return collections.OrderedDict([ ('x', tf.TensorSpec([None, 784], tf.float32)), ('y', tf.TensorSpec([None, 1], tf.int32)) ])