コード例 #1
0
    def testMatchingTensorInfoProtoMaps(self):
        with tf_v1.Graph().as_default():
            sig1 = _make_signature({
                "x": tf_v1.placeholder(tf.int32, [2]),
            }, {
                "x": tf_v1.placeholder(tf.int32, [2]),
            })

            sig2 = _make_signature(
                {
                    "x": tf_v1.placeholder(tf.int32, [2]),
                }, {
                    "x": tf_v1.sparse_placeholder(tf.int64, [2]),
                })
            self.assertTrue(
                tensor_info.tensor_info_proto_maps_match(
                    sig1.inputs, sig2.inputs))
            self.assertFalse(
                tensor_info.tensor_info_proto_maps_match(
                    sig1.outputs, sig2.outputs))

            sig3 = _make_signature({
                "x": tf_v1.placeholder(tf.int32, [None]),
            }, {
                "x": tf_v1.placeholder(tf.int32, [2]),
            })
            self.assertFalse(
                tensor_info.tensor_info_proto_maps_match(
                    sig1.inputs, sig3.inputs))
            self.assertTrue(
                tensor_info.tensor_info_proto_maps_match(
                    sig1.outputs, sig3.outputs))
コード例 #2
0
    def testParsingTensorInfoProtoMaps(self):
        with tf_v1.Graph().as_default():
            sig = _make_signature(
                {
                    "x": tf_v1.placeholder(tf.string, [2]),
                }, {
                    "y": tf_v1.placeholder(tf.int32, [2]),
                    "z": tf_v1.sparse_placeholder(tf.float32, [2, 10]),
                })

            inputs = tensor_info.parse_tensor_info_map(sig.inputs)
            self.assertEqual(set(inputs.keys()), set(["x"]))
            self.assertEqual(inputs["x"].get_shape(), [2])
            self.assertEqual(inputs["x"].dtype, tf.string)
            self.assertFalse(inputs["x"].is_sparse)

            outputs = tensor_info.parse_tensor_info_map(sig.outputs)
            self.assertEqual(set(outputs.keys()), set(["y", "z"]))
            self.assertEqual(outputs["y"].get_shape(), [2])
            self.assertEqual(outputs["y"].dtype, tf.int32)
            self.assertFalse(outputs["y"].is_sparse)

            self.assertEqual(outputs["z"].get_shape(), [2, 10])
            self.assertEqual(outputs["z"].dtype, tf.float32)
            self.assertTrue(outputs["z"].is_sparse)
コード例 #3
0
    def testBuildOutputMap(self):
        with tf_v1.Graph().as_default():
            x = tf_v1.placeholder(tf.int32, [2])
            y = tf_v1.sparse_placeholder(tf.string, [None])
            sig = _make_signature({}, {"x": x, "y": y})

            def _get_tensor(name):
                return tf_v1.get_default_graph().get_tensor_by_name(name)

            output_map = tensor_info.build_output_map(sig.outputs, _get_tensor)
            self.assertEqual(len(output_map), 2)
            self.assertEqual(output_map["x"], x)
            self.assertEqual(output_map["y"].indices, y.indices)
            self.assertEqual(output_map["y"].values, y.values)
            self.assertEqual(output_map["y"].dense_shape, y.dense_shape)
コード例 #4
0
    def testBuildInputMap(self):
        with tf_v1.Graph().as_default():
            x = tf_v1.placeholder(tf.int32, [2])
            y = tf_v1.sparse_placeholder(tf.string, [None])
            sig = _make_signature({"x": x, "y": y}, {})

            input_map = tensor_info.build_input_map(sig.inputs, {
                "x": x,
                "y": y
            })
            self.assertEqual(len(input_map), 4)
            self.assertEqual(input_map[x.name], x)
            self.assertEqual(input_map[y.indices.name], y.indices)
            self.assertEqual(input_map[y.values.name], y.values)
            self.assertEqual(input_map[y.dense_shape.name], y.dense_shape)
コード例 #5
0
    def testConvertTensors(self):
        with tf_v1.Graph().as_default():
            a = tf_v1.placeholder(tf.int32, [None])
            protomap = _make_signature({"a": a}, {}).inputs
            targets = tensor_info.parse_tensor_info_map(protomap)

            # convert constant
            in0 = [1, 2, 3]
            output = tensor_info.convert_dict_to_compatible_tensor({"a": in0},
                                                                   targets)
            self.assertEqual(output["a"].dtype, a.dtype)

            # check sparsity
            in1 = tf_v1.sparse_placeholder(tf.int32, [])
            with self.assertRaisesRegexp(TypeError, "dense"):
                tensor_info.convert_dict_to_compatible_tensor({"a": in1},
                                                              targets)
コード例 #6
0
    def testRepr(self):
        with tf_v1.Graph().as_default():
            sig = _make_signature(
                {
                    "x": tf_v1.placeholder(tf.string, [2]),
                }, {
                    "y": tf_v1.placeholder(tf.int32, [2]),
                    "z": tf_v1.sparse_placeholder(tf.float32, [2, 10]),
                })

            outputs = tensor_info.parse_tensor_info_map(sig.outputs)
            self.assertEqual(
                repr(outputs["y"]),
                "<hub.ParsedTensorInfo shape=(2,) dtype=int32 is_sparse=False>"
            )
            self.assertEqual(
                repr(outputs["z"]),
                "<hub.ParsedTensorInfo shape=(2, 10) dtype=float32 is_sparse=True>"
            )
コード例 #7
0
def eval_function_for_module(spec, tags=None):
    """Context manager that yields a function to directly evaluate a Module.

  This creates a separate graph, in which all of the signatures of the module
  are instantiated. Then, it creates a session and initializes the module
  variables. Finally, it returns a function which can be used to evaluate the
  module signatures.

  The function returned by eval_function_for_module has the same syntax as
  Module.__call__ , except that inputs and outputs are not tensors but actual
  values as used with Session.run().

  ```python
  with hub.eval_function_for_module("/tmp/text-embedding") as f:
    # The module can be directly evaluated using f without constructing a graph.
    embeddings = f(["Hello world!",], signature="mysignature")
  ```

  Args:
    spec: A ModuleSpec defining the Module to instantiate or a path where to
      load a ModuleSpec from via `load_module_spec`.
    tags: A set of strings specifying the graph variant to use.

  Yields:
    A function whose keyword arguments are fed into the tfhub module and which
      returns a dictionary with the value of the output tensors.

  Raises:
    RuntimeError: explaning the reason why it failed to instantiate the
      Module.
    ValueError: if the requested graph variant does not exists.
  """
    # We create a separate graph and add all the signatures of the module to it.
    original_graph = tf_v1.get_default_graph()
    with tf.Graph().as_default():
        module = Module(spec, tags=tags)
        input_tensors_per_signature = {}
        output_tensors_per_signature = {}
        for signature in module.get_signature_names():
            # We scope with the signature name as different signatures will likely
            # contain tensors with the same name (e.g. the input and output tensors).
            with tf_v1.variable_scope(signature):
                input_tensors = {}
                for name, tensorinfo in module.get_input_info_dict(
                        signature).items():
                    # We need to be care with the shape as it may be fully-known,
                    # partially-known or even unknown.
                    shape = tensorinfo.get_shape()
                    effective_shape = None if shape.dims is None else shape.as_list(
                    )
                    if tensorinfo.is_sparse:
                        input_tensors[name] = tf_v1.sparse_placeholder(
                            tensorinfo.dtype, shape=effective_shape, name=name)
                    else:
                        input_tensors[name] = tf_v1.placeholder(
                            tensorinfo.dtype, shape=effective_shape, name=name)
                input_tensors_per_signature[signature] = input_tensors
                output_tensors_per_signature[signature] = module(
                    input_tensors_per_signature[signature],
                    signature=signature,
                    as_dict=True)

    # Evaluating the tfhub module requires an active tensorflow session.
        with tf_v1.train.SingularMonitoredSession() as sess:

            def func(
                    inputs=None,
                    _sentinel=None,  # pylint: disable=invalid-name
                    signature=None,
                    as_dict=None):
                """Function that directly evaluates a signature in the module."""
                signature = signature or "default"
                input_tensors = input_tensors_per_signature[signature]

                dict_inputs = _prepare_dict_inputs(inputs, input_tensors)

                # The input arguments are directly fed into the session.
                feed_dict = {
                    input_tensors[key]: value
                    for key, value in dict_inputs.items()
                }
                output = output_tensors_per_signature[signature]
                output = _prepare_outputs(output, as_dict)
                return sess.run(output, feed_dict=feed_dict)

            with original_graph.as_default():
                # Yield the function since that will keep the session alive until the
                # user exits the context.
                yield func