def testMatchingTensorInfoProtoMaps(self): with tf_v1.Graph().as_default(): sig1 = _make_signature({ "x": tf_v1.placeholder(tf.int32, [2]), }, { "x": tf_v1.placeholder(tf.int32, [2]), }) sig2 = _make_signature( { "x": tf_v1.placeholder(tf.int32, [2]), }, { "x": tf_v1.sparse_placeholder(tf.int64, [2]), }) self.assertTrue( tensor_info.tensor_info_proto_maps_match( sig1.inputs, sig2.inputs)) self.assertFalse( tensor_info.tensor_info_proto_maps_match( sig1.outputs, sig2.outputs)) sig3 = _make_signature({ "x": tf_v1.placeholder(tf.int32, [None]), }, { "x": tf_v1.placeholder(tf.int32, [2]), }) self.assertFalse( tensor_info.tensor_info_proto_maps_match( sig1.inputs, sig3.inputs)) self.assertTrue( tensor_info.tensor_info_proto_maps_match( sig1.outputs, sig3.outputs))
def testParsingTensorInfoProtoMaps(self): with tf_v1.Graph().as_default(): sig = _make_signature( { "x": tf_v1.placeholder(tf.string, [2]), }, { "y": tf_v1.placeholder(tf.int32, [2]), "z": tf_v1.sparse_placeholder(tf.float32, [2, 10]), }) inputs = tensor_info.parse_tensor_info_map(sig.inputs) self.assertEqual(set(inputs.keys()), set(["x"])) self.assertEqual(inputs["x"].get_shape(), [2]) self.assertEqual(inputs["x"].dtype, tf.string) self.assertFalse(inputs["x"].is_sparse) outputs = tensor_info.parse_tensor_info_map(sig.outputs) self.assertEqual(set(outputs.keys()), set(["y", "z"])) self.assertEqual(outputs["y"].get_shape(), [2]) self.assertEqual(outputs["y"].dtype, tf.int32) self.assertFalse(outputs["y"].is_sparse) self.assertEqual(outputs["z"].get_shape(), [2, 10]) self.assertEqual(outputs["z"].dtype, tf.float32) self.assertTrue(outputs["z"].is_sparse)
def image_module_fn(): images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 2, 4, 3]) sum_by_channels = tf.reduce_sum(images, [1, 2]) sum_all = tf.reduce_sum(images, [1, 2, 3]) native_module.add_signature(inputs=dict(images=images), outputs=dict(default=sum_all, sum_by_channels=sum_by_channels))
def text_module_fn(): weights = tf_v1.get_variable("weights", dtype=tf.float32, shape=[100, 10]) # initializer=tf.random_uniform_initializer()) text = tf_v1.placeholder(tf.string, shape=[None]) hash_buckets = tf_v1.string_to_hash_bucket_fast(text, weights.get_shape()[0]) embeddings = tf_v1.gather(weights, hash_buckets) hub.add_signature(inputs=text, outputs=embeddings)
def image_module_fn(): """Maps 1x2 images to sums of each color channel.""" images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3]) weight = tf_v1.get_variable(name="weight", initializer=1.0, dtype=tf.float32) sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight hub.add_signature(inputs={"images": images}, outputs=sum_channels)
def image_module_fn_with_info(): images = tf_v1.placeholder(dtype=tf.float32, shape=[None, None, None, 3]) sum_all = tf.reduce_sum(images, [1, 2, 3]) native_module.add_signature(inputs=dict(images=images), outputs=dict(default=sum_all)) image_module_info = image_util.ImageModuleInfo() size = image_module_info.default_image_size size.height, size.width = 2, 4 image_util.attach_image_module_info(image_module_info)
def testRepr(self): with tf_v1.Graph().as_default(): sig = _make_signature( { "x": tf_v1.placeholder(tf.string, [2]), }, { "y": tf_v1.placeholder(tf.int32, [2]), "z": tf_v1.sparse_placeholder(tf.float32, [2, 10]), }) outputs = tensor_info.parse_tensor_info_map(sig.outputs) self.assertEqual( repr(outputs["y"]), "<hub.ParsedTensorInfo shape=(2,) dtype=int32 is_sparse=False>" ) self.assertEqual( repr(outputs["z"]), "<hub.ParsedTensorInfo shape=(2, 10) dtype=float32 is_sparse=True>" )
def image_module_fn(): """Maps 1x2 images to sums of each color channel.""" images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3]) if randomly_initialized: initializer = tf_v1.random_uniform_initializer( minval=-1, maxval=1, dtype=tf.float32) else: initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32) weight = tf_v1.get_variable( name="weight", shape=[1], initializer=initializer) sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight hub.add_signature(inputs={"images": images}, outputs=sum_channels)
def testBuildInputMap(self): with tf_v1.Graph().as_default(): x = tf_v1.placeholder(tf.int32, [2]) y = tf_v1.sparse_placeholder(tf.string, [None]) sig = _make_signature({"x": x, "y": y}, {}) input_map = tensor_info.build_input_map(sig.inputs, { "x": x, "y": y }) self.assertEqual(len(input_map), 4) self.assertEqual(input_map[x.name], x) self.assertEqual(input_map[y.indices.name], y.indices) self.assertEqual(input_map[y.values.name], y.values) self.assertEqual(input_map[y.dense_shape.name], y.dense_shape)
def testBuildOutputMap(self): with tf_v1.Graph().as_default(): x = tf_v1.placeholder(tf.int32, [2]) y = tf_v1.sparse_placeholder(tf.string, [None]) sig = _make_signature({}, {"x": x, "y": y}) def _get_tensor(name): return tf_v1.get_default_graph().get_tensor_by_name(name) output_map = tensor_info.build_output_map(sig.outputs, _get_tensor) self.assertEqual(len(output_map), 2) self.assertEqual(output_map["x"], x) self.assertEqual(output_map["y"].indices, y.indices) self.assertEqual(output_map["y"].values, y.values) self.assertEqual(output_map["y"].dense_shape, y.dense_shape)
def testConvertTensors(self): with tf_v1.Graph().as_default(): a = tf_v1.placeholder(tf.int32, [None]) protomap = _make_signature({"a": a}, {}).inputs targets = tensor_info.parse_tensor_info_map(protomap) # convert constant in0 = [1, 2, 3] output = tensor_info.convert_dict_to_compatible_tensor({"a": in0}, targets) self.assertEqual(output["a"].dtype, a.dtype) # check sparsity in1 = tf_v1.sparse_placeholder(tf.int32, []) with self.assertRaisesRegexp(TypeError, "dense"): tensor_info.convert_dict_to_compatible_tensor({"a": in1}, targets)
def createSavedModel(self): model_dir = os.path.join(self.get_temp_dir(), "saved_model") with tf.Graph().as_default(): x = tf_v1.placeholder(dtype=tf.float32, shape=[None, 3]) w = tf_v1.get_variable("weights", shape=[]) y = x * w tf_v1.add_to_collection(_EXTRA_COLLECTION, y) init_op = tf_v1.assign(w, 2) with tf_v1.Session() as session: session.run(init_op) tf_v1.saved_model.simple_save( session, model_dir, inputs={"x": x}, outputs={"y": y}, ) return model_dir
def text_module_fn(): embeddings = [ ("", [0, 0, 0, 0]), # OOV items are mapped to this embedding. ("hello world", [1, 2, 3, 4]), ("pair-programming", [5, 5, 5, 5]), ] keys = tf.constant([item[0] for item in embeddings], dtype=tf.string) indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64) tbl_init = KeyValueTensorInitializer(keys, indices) table = HashTable(tbl_init, 0) weights_initializer = tf.cast( tf.constant(list([item[1] for item in embeddings])), tf.float32) weights = tf_v1.get_variable( "weights", dtype=tf.float32, initializer=weights_initializer) text_tensor = tf_v1.placeholder(dtype=tf.string, name="text", shape=[None]) indices_tensor = table.lookup(text_tensor) embedding_tensor = tf.gather(weights, indices_tensor) hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)
def _stateless_module_fn(self): """Simple module that squares an input.""" x = tf_v1.placeholder(tf.int64) y = x*x hub.add_signature(inputs=x, outputs=y)
def assets_module_fn(): indices = tf_v1.placeholder(dtype=tf.int64, name="indices") table = index_to_string_table_from_file( vocabulary_file=vocab_filename, default_value="UNKNOWN") outputs = table.lookup(indices) hub.add_signature(inputs=indices, outputs=outputs)
def eval_function_for_module(spec, tags=None): """Context manager that yields a function to directly evaluate a Module. This creates a separate graph, in which all of the signatures of the module are instantiated. Then, it creates a session and initializes the module variables. Finally, it returns a function which can be used to evaluate the module signatures. The function returned by eval_function_for_module has the same syntax as Module.__call__ , except that inputs and outputs are not tensors but actual values as used with Session.run(). ```python with hub.eval_function_for_module("/tmp/text-embedding") as f: # The module can be directly evaluated using f without constructing a graph. embeddings = f(["Hello world!",], signature="mysignature") ``` Args: spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec from via `load_module_spec`. tags: A set of strings specifying the graph variant to use. Yields: A function whose keyword arguments are fed into the tfhub module and which returns a dictionary with the value of the output tensors. Raises: RuntimeError: explaning the reason why it failed to instantiate the Module. ValueError: if the requested graph variant does not exists. """ # We create a separate graph and add all the signatures of the module to it. original_graph = tf_v1.get_default_graph() with tf.Graph().as_default(): module = Module(spec, tags=tags) input_tensors_per_signature = {} output_tensors_per_signature = {} for signature in module.get_signature_names(): # We scope with the signature name as different signatures will likely # contain tensors with the same name (e.g. the input and output tensors). with tf_v1.variable_scope(signature): input_tensors = {} for name, tensorinfo in module.get_input_info_dict( signature).items(): # We need to be care with the shape as it may be fully-known, # partially-known or even unknown. shape = tensorinfo.get_shape() effective_shape = None if shape.dims is None else shape.as_list( ) if tensorinfo.is_sparse: input_tensors[name] = tf_v1.sparse_placeholder( tensorinfo.dtype, shape=effective_shape, name=name) else: input_tensors[name] = tf_v1.placeholder( tensorinfo.dtype, shape=effective_shape, name=name) input_tensors_per_signature[signature] = input_tensors output_tensors_per_signature[signature] = module( input_tensors_per_signature[signature], signature=signature, as_dict=True) # Evaluating the tfhub module requires an active tensorflow session. with tf_v1.train.SingularMonitoredSession() as sess: def func( inputs=None, _sentinel=None, # pylint: disable=invalid-name signature=None, as_dict=None): """Function that directly evaluates a signature in the module.""" signature = signature or "default" input_tensors = input_tensors_per_signature[signature] dict_inputs = _prepare_dict_inputs(inputs, input_tensors) # The input arguments are directly fed into the session. feed_dict = { input_tensors[key]: value for key, value in dict_inputs.items() } output = output_tensors_per_signature[signature] output = _prepare_outputs(output, as_dict) return sess.run(output, feed_dict=feed_dict) with original_graph.as_default(): # Yield the function since that will keep the session alive until the # user exits the context. yield func
def invalid_text_module_fn(): text = tf_v1.placeholder(tf.string, shape=[10]) hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))
def image_module_fn(): """Maps 1x2 images to sums of each color channel.""" images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3]) sum_channels = tf.reduce_sum(images, axis=[1, 2]) hub.add_signature(inputs={"images": images}, outputs=sum_channels)
def _serving_input_fn(): """A serving input fn.""" text_features = tf_v1.placeholder(dtype=tf.string, shape=[None]) return tf_v1.estimator.export.ServingInputReceiver( features={_TEXT_FEATURE_NAME: text_features}, receiver_tensors=text_features)