Пример #1
0
    def test_placeholder(self):
        """Test placeholder functionalities."""
        g0_graph = tf.Graph()
        with g0_graph.as_default():
            tf.constant(1, name="foo")

        g0 = gde.Graph(g0_graph)
        a0 = g0["foo"].output(0)

        # Test placeholder name.
        self.assertEqual(gde.util.placeholder_name(a0), "geph__foo_0")
        self.assertEqual(gde.util.placeholder_name(None), "geph")
        self.assertEqual(gde.util.placeholder_name(a0, scope="foo/"),
                         "foo/geph__foo_0")
        self.assertEqual(gde.util.placeholder_name(a0, scope="foo"),
                         "foo/geph__foo_0")
        self.assertEqual(gde.util.placeholder_name(None, scope="foo/"),
                         "foo/geph")
        self.assertEqual(gde.util.placeholder_name(None, scope="foo"),
                         "foo/geph")

        # Test placeholder creation.
        g1_graph = tf.Graph()
        with g1_graph.as_default():
            tf.constant(1, dtype=tf.float32, name="a1")

        g1 = gde.Graph(g1_graph)
        a1_tensor = g1["a1"].output(0)
        print("Type of a1_tensor is {}".format(type(a1_tensor)))

        ph1 = gde.util.make_placeholder_from_tensor(g1, a1_tensor)
        ph2 = gde.util.make_placeholder_from_dtype_and_shape(g1,
                                                             dtype=tf.float32)
        self.assertEqual(ph1.name, "geph__a1_0")
        self.assertEqual(ph2.name, "geph")
  def test_graph_while_loop(self):
    tf_graph = tf.Graph()
    with tf_graph.as_default():
      max_index = tf.placeholder(dtype=tf.int32, shape=tuple())
      index_start = tf.constant(1)
      sum_start = tf.constant(0)
      _, result = tf.while_loop(
          cond=lambda i, unused_s: i <= max_index,
          body=lambda i, s: (i + 1, s + i),
          loop_vars=[index_start, sum_start])
    g = gde.Graph(tf_graph)
    result_tensor = g[result.op.name].output(0)
    max_index_tensor = g[max_index.op.name].output(0)

    g.frozen = True
    copied_graph = gde.Graph()
    _, copy_info = gde.copy(
        g, dst_graph=copied_graph, dst_scope="imported")
    copied_result_tensor = copy_info.transformed(result_tensor)
    copied_max_index_tensor = copy_info.transformed(max_index_tensor)

    tf_copied_graph = tf.Graph()
    with tf_copied_graph.as_default():
      tf.import_graph_def(copied_graph.to_graph_def(), name="")
      with tf.Session() as sess:
        n = 10
        sum_val = sess.run(copied_result_tensor.name,
                           feed_dict={copied_max_index_tensor.name: n})
        self.assertEqual(sum_val, 55)
  def test_graph_cond(self):
    tf_g = tf.Graph()
    with tf_g.as_default():
      choice_tensor = tf.placeholder(shape=(), dtype=tf.bool, name="choice")
      _ = tf.identity(
        tf.cond(
          choice_tensor,
          lambda: tf.constant(1),
          lambda: tf.constant(2)
        ),
        name="result"
      )

    g = gde.Graph(tf_g)
    choice = g["choice"].output(0)
    result = g["result"].output(0)

    copied_g = gde.Graph()
    _, copy_info = gde.copy(
        g, dst_graph=copied_g, dst_scope="imported")
    copied_result = copy_info.transformed(result)
    copied_choice = copy_info.transformed(choice)

    tf_copied_graph = tf.Graph()
    with tf_copied_graph.as_default():
      tf.import_graph_def(copied_g.to_graph_def(), name="")
      with tf.Session() as sess:
        res = sess.run(copied_result.name, feed_dict={copied_choice.name: True})
        self.assertEqual(res, 1)
        res = sess.run(copied_result.name,
                       feed_dict={copied_choice.name: False})
        self.assertEqual(res, 2)
Пример #4
0
    def test_unique_graph(self):
        """Test for gde.util.check_graphs and gde.util.get_unique_graph."""
        g0_graph = tf.Graph()
        with g0_graph.as_default():
            tf.constant(1, name="a")
            tf.constant(2, name="b")
        g1_graph = tf.Graph()
        with g1_graph.as_default():
            tf.constant(1, name="a")
            tf.constant(2, name="b")

        g0 = gde.Graph(g0_graph.as_graph_def())
        g1 = gde.Graph(g1_graph.as_graph_def())
        a0, b0, a1, b1 = (g0["a"], g0["b"], g1["a"], g1["b"])

        print("g0['a'] returns {} (type {})".format(g0['a'], type(g0['a'])))

        # Same graph, should be fine.
        self.assertIsNone(gde.util.check_graphs(a0, b0))
        # Two different graphs, should assert.
        with self.assertRaises(ValueError):
            gde.util.check_graphs(a0, b0, a1, b1)
        # a0 and b0 belongs to the same graph, should be fine.
        self.assertEqual(gde.util.get_unique_graph([a0, b0]), g0)
        # Different graph, should raise an error.
        with self.assertRaises(ValueError):
            gde.util.get_unique_graph([a0, b0, a1, b1])
Пример #5
0
  def test_copy_with_collection(self):
    """Test for issue #36"""
    tmp_graph = tf.Graph()
    with tmp_graph.as_default():
      c = tf.constant(42, name="FortyTwo")
      tmp_graph.add_to_collection("Answers", c)

    g = gde.Graph(tmp_graph)
    g2 = gde.Graph()
    gde.transform.copy(g, g2)
    self.assertTrue("Answers" in g2.get_all_collection_keys())
Пример #6
0
    def test_graph_collection_types(self):

        # Build a graph with NodeList that has an operation and tensor,
        # and ByteList with variable
        tf_g = tf.Graph()
        with tf_g.as_default():
            y_ = tf.placeholder(tf.int64, [None])
            x = tf.get_variable("x", [1])
            with tf.name_scope('loss'):
                cross_entropy = tf.losses.sparse_softmax_cross_entropy(
                    labels=y_, logits=x)
            with tf.name_scope('adam_optimizer'):
                _ = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

        g = gde.Graph(tf_g)

        keys = g.get_all_collection_keys()

        # Check that loss tensor added to collection
        self.assertIn('losses', keys)
        t = g.get_tensor_by_name(
            "loss/sparse_softmax_cross_entropy_loss/value:0")
        self.assertIn('losses', t.collection_names)

        # Check that variable added to collection
        self.assertIn('variables', keys)
        v = g.get_variable_by_name('x:0')
        self.assertIn('variables', v.collection_names)

        # Check that op added to collection
        self.assertIn('train_op', keys)
        n = g.get_node_by_name("adam_optimizer/Adam")
        self.assertIn('train_op', n.collection_names)
Пример #7
0
    def test_change_batch_size_variable_size(self):
        """
    Verifies that a batch size of None (variable size) works.
    Also verifies that passing a tensor instead of node works.
    """
        tf_g = tf.Graph()
        with tf_g.as_default():
            input_tensor = tf.placeholder(dtype=tf.float32,
                                          shape=[32, 1],
                                          name="Input")
            result_tensor = input_tensor + 42.0
        g = gde.Graph(tf_g)
        # Note that we pass a Tensor as the third argument instead of a Node.
        gde.rewrite.change_batch_size(g, None, [g[input_tensor.name]])

        with g.to_tf_graph().as_default():
            with tf.Session() as sess:
                result = sess.run(
                    result_tensor.name,
                    {input_tensor.name: np.array([0]).reshape([1, 1])})
                self.assertTrue(
                    np.array_equal(result,
                                   np.array([42.]).reshape([1, 1])))
                result = sess.run(
                    result_tensor.name,
                    {input_tensor.name: np.array([0, 1]).reshape([2, 1])})
                self.assertTrue(
                    np.array_equal(result,
                                   np.array([42., 43.]).reshape([2, 1])))
  def test_transform_nodedef_fn(self):
    transformer = gde.Transformer()

    def nodedef_fn(node_def):
      if "_foo" in node_def.attr:
        del node_def.attr["_foo"]
      node_def.attr["_bar"].s = b"bar"
      return node_def

    my_copy_op_handler = functools.partial(
        gde.transform.copy_op_handler, nodedef_fn=nodedef_fn)
    transformer.transform_op_handler = my_copy_op_handler

    graph = gde.Graph()
    transformer(self.graph, graph, "", "")

    c0_before = self.graph["Const"]
    c0_after = graph["Const"]
    self.assertEqual(c0_before.get_attr("_foo"), "foo")
    with self.assertRaises(ValueError):
      c0_after.get_attr("_foo")

    all_ops = graph.nodes
    for op in all_ops:
      self.assertEqual(op.get_attr("_bar"), "bar")
Пример #9
0
def main(_):
    # Create a graph
    tf_g = tf.Graph()
    with tf_g.as_default():
        a = tf.constant(1.0, shape=[2, 3], name="a")
        c = tf.add(tf.placeholder(dtype=np.float32),
                   tf.placeholder(dtype=np.float32),
                   name="c")

    # Serialize the graph
    g = gde.Graph(tf_g.as_graph_def())
    print("Before:\n{}".format(_indent(g.to_graph_def())))

    # Modify the graph.
    # In this case we replace the two input placeholders with constants.
    # One of the constants (a) is a node that was in the original graph.
    # The other one (b) we create here.
    b = gde.make_const(g, "b", np.full([2, 3], 2.0, dtype=np.float32))
    gde.swap_inputs(g[c.op.name], [g[a.name], b.output(0)])

    print("After:\n{}".format(_indent(g.to_graph_def())))

    # Reconstitute the modified serialized graph as TensorFlow graph...
    with g.to_tf_graph().as_default():
        # ...and print the value of c, which should be 2x3 matrix of 3.0's
        with tf.Session() as sess:
            res = sess.run(c.name)
            print("Result is:\n{}".format(_indent(res)))
  def test_graph_replace_gradients(self):
    tmp_graph = tf.Graph()
    with tmp_graph.as_default():
      w_tensor = tf.Variable(0.0, name="w")
      y_tensor = tf.multiply(tf.multiply(w_tensor, w_tensor, name="mul1"),
                             w_tensor, name="mul2")
      grad_tensor = tf.gradients(y_tensor, w_tensor, name="gradient")[0]
      _ = tf.identity(grad_tensor, "grad")

    g = gde.Graph(tmp_graph)

    # Extract the operations.
    replacement_ts = {g["w/read"].output(0): g["grad"].output(0)}

    # Should not raise exception.
    res = gde.graph_replace(g["grad"].output(0), replacement_ts,
                                         dst_scope="res")

    self.assertNotEqual(res.name, g["grad"].output(0).name)
    after_graph = tf.Graph()
    with after_graph.as_default():
      tf.import_graph_def(g.to_graph_def(), name="")
      gde.util.load_variables_to_tf_graph(g)
      with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        g_val, res_val = sess.run([g["grad"].output(0).name, res.name])
    self.assertNear(g_val, 0.0, ERROR_TOLERANCE)
    self.assertNear(res_val, 0.0, ERROR_TOLERANCE)
Пример #11
0
    def test_fold_batch_norms_mat_mul(self):
        """
    Python port of TestFoldBatchNormsMatMul in the TF Graph Transform Tool
    tests.
    """
        input_data = (np.array(
            [1., 4., 2., 5., 3., 6., -1., -4., -2., -5., -3., -6.],
            dtype=np.float32).reshape([6, 2]))
        weights_data = (np.array([1., 2., 3., 4.],
                                 dtype=np.float32).reshape([2, 2]))
        mul_values_data = (np.array([2., 3.], dtype=np.float32).reshape([2]))

        # Create and run graph:
        # (input, weights) --> MatMul --> Mul(const)
        tf_g = tf.Graph()
        with tf_g.as_default():
            in_t = tf.constant(input_data, name="input_op")
            weights_t = tf.constant(weights_data, name="weights_op")
            matmul_t = tf.linalg.matmul(in_t, weights_t, name="matmul_op")
            mul_values_t = tf.constant(mul_values_data, name="mul_values")
            output_t = tf.multiply(matmul_t, mul_values_t, name="output")
        with tf.Session(graph=tf_g) as sess:
            original_outputs = sess.run(output_t)

        # Rewrite and compare results
        g = gde.Graph(tf_g)
        gde.rewrite.fold_batch_norms(g)
        with tf.Session(graph=g.to_tf_graph()) as sess:
            fused_outputs = sess.run(output_t.name)

        self.assertClose(original_outputs, fused_outputs, delta=1e-5)

        # Make sure the rewrite happened
        for n in g.nodes:
            self.assertNotEqual(n.op_type, "Mul")
Пример #12
0
    def test_fold_batch_norms_up_fused(self):
        """
    Test of the fold_batch_norms_up() rewrite with the pattern:
       FusedBatchNorm => Relu => Conv2D
    """
        input_data = (np.array(
            [1., 4., 2., 5., 3., 6., -1., -4., -2., -5., -3., -6.],
            dtype=np.float32).reshape([1, 1, 6, 2]))
        weights_data = (np.array([1., 2., 3., 4., 0.1, 0.2, 0.3, 0.4],
                                 dtype=np.float32).reshape([1, 2, 2, 2]))
        mean_data = np.array([10., 20.], dtype=np.float32).reshape([2])
        variance_data = np.array([0.25, 0.5], dtype=np.float32).reshape([2])
        beta_data = np.array([0.1, 0.6], dtype=np.float32).reshape([2])
        gamma_data = np.array([1., 2.], dtype=np.float32).reshape([2])

        # Create the non-deprecated part of the graph
        # input -> Relu -> Conv2D
        tf_g = tf.Graph()
        with tf_g.as_default():
            in_t = tf.constant(input_data, name="input_op")
            relu_t = tf.nn.relu(in_t, name="relu_op")
            weights_t = tf.constant(weights_data, name="weights_op")
            conv_t = tf.nn.conv2d(relu_t,
                                  weights_t, [1, 1, 1, 1],
                                  "VALID",
                                  name="output")
            mean_t = tf.constant(mean_data, name="mean_op")
            variance_t = tf.constant(variance_data, name="variance_op")
            beta_t = tf.constant(beta_data, name="beta_op")
            gamma_t = tf.constant(gamma_data, name="gamma_op")
        g = gde.Graph(tf_g)

        # Add fused batch norm node manually because there's no Python API to add
        # this op directly.
        batch_norm_node = g.add_node("batch_norm_op", "FusedBatchNorm")
        batch_norm_node.set_inputs([
            g[in_t.name], g[gamma_t.name], g[beta_t.name], g[mean_t.name],
            g[variance_t.name]
        ])
        batch_norm_node.add_attr("T", tf.float32)
        batch_norm_node.add_attr("epsilon", 0.00001)
        batch_norm_node.add_attr("is_training", False)
        batch_norm_node.infer_outputs()

        # Redirect the input of the ReLU to our new batch norm
        g.get_node_by_name(relu_t.op.name).set_inputs(
            [batch_norm_node.output(0)])

        # Run the graph before and after the rewrite and compare results
        with tf.Session(graph=g.to_tf_graph()) as sess:
            original_outputs = sess.run("output:0")
        gde.rewrite.fold_batch_norms_up(g)
        with tf.Session(graph=g.to_tf_graph()) as sess:
            fused_outputs = sess.run("output:0")
        self.assertClose(original_outputs, fused_outputs, delta=1e-5)

        # Make sure the rewrite happened
        for n in g.nodes:
            self.assertNotEqual(n.op_type, "FusedBatchNorm")
Пример #13
0
 def build_graph(self):
     tf_g = tf.Graph()
     with tf_g.as_default():
         a = tf.constant(1, name="a")
         b = tf.constant(2, name="b")
         c = tf.constant(10, name="c")
         add_res = tf.add(a, b, name="add")
         res = tf.multiply(add_res, c, name="mult")
     g = gde.Graph(g=tf_g)
     return g
Пример #14
0
    def test_fold_fused_batch_norms(self):
        """
    Version of test_fold_old_batch_norms() with a FusedBatchNorms op instead
    of BatchNormWithGlobalNormalization
    """
        input_data = (np.array(
            [1., 4., 2., 5., 3., 6., -1., -4., -2., -5., -3., -6.],
            dtype=np.float32).reshape([1, 1, 6, 2]))
        weights_data = (np.array([1., 2., 3., 4., 0.1, 0.2, 0.3, 0.4],
                                 dtype=np.float32).reshape([1, 2, 2, 2]))
        mean_data = np.array([10., 20.], dtype=np.float32).reshape([2])
        variance_data = np.array([0.25, 0.5], dtype=np.float32).reshape([2])
        beta_data = np.array([0.1, 0.6], dtype=np.float32).reshape([2])
        gamma_data = np.array([1., 2.], dtype=np.float32).reshape([2])

        # Create the non-deprecated part of the graph
        # (input, weights) --> Conv2D --> [...], plus inputs to [...]
        tf_g = tf.Graph()
        with tf_g.as_default():
            in_t = tf.constant(input_data, name="input_op")
            weights_t = tf.constant(weights_data, name="weights_op")
            conv_t = tf.nn.conv2d(in_t,
                                  weights_t, [1, 1, 1, 1],
                                  "VALID",
                                  name="conv_op")
            mean_t = tf.constant(mean_data, name="mean_op")
            variance_t = tf.constant(variance_data, name="variance_op")
            beta_t = tf.constant(beta_data, name="beta_op")
            gamma_t = tf.constant(gamma_data, name="gamma_op")
        g = gde.Graph(tf_g)

        # Add fused batch norm node manually because there's no Python API to add
        # this op directly.
        batch_norm_node = g.add_node("output", "FusedBatchNorm")
        batch_norm_node.set_inputs([
            g[conv_t.name], g[gamma_t.name], g[beta_t.name], g[mean_t.name],
            g[variance_t.name]
        ])
        batch_norm_node.add_attr("T", tf.float32)
        batch_norm_node.add_attr("epsilon", 0.00001)
        batch_norm_node.add_attr("is_training", False)
        batch_norm_node.infer_outputs()

        # Run the graph before and after the rewrite and compare results
        with tf.Session(graph=g.to_tf_graph()) as sess:
            original_outputs = sess.run("output:0")
        gde.rewrite.fold_old_batch_norms(g)
        with tf.Session(graph=g.to_tf_graph()) as sess:
            fused_outputs = sess.run("output:0")
        self.assertClose(original_outputs, fused_outputs, delta=1e-5)

        # Make sure the rewrite happened
        for n in g.nodes:
            self.assertNotEqual(n.op_type, "FusedBatchNorm")
 def setUp(self):
   tf_graph = tf.Graph()
   with tf_graph.as_default():
     c0 = tf.constant(1.0, shape=[10], name="Const")
     c0.op._set_attr("_foo", tf.AttrValue(s=b"foo"))
     c1 = tf.constant(1.0, shape=[10], name="Const")
     c2 = tf.constant(1.0, shape=[10], name="Const")
     i = tf.constant(1.0, shape=[10], name="Input")
     tf.identity(tf.add(c2, tf.add(c1, tf.add(c0, i))), name="o")
   self.graph = gde.Graph(tf_graph)
   self.o = self.graph["o"]
 def test_graph_replace_missing(self):
   tmp_graph = tf.Graph()
   with tmp_graph.as_default():
     a_tensor = tf.constant(1.0, name="a")
     b_tensor = tf.constant(2.0, name="b")
     _ = tf.add(a_tensor, 2 * b_tensor, name="c")
     _ = tf.constant(2.0, name="d")
   g = gde.Graph(tmp_graph)
   res = gde.graph_replace([g["b"].output(0), g["c"].output(0)],
                                        {g["a"].output(0): g["d"].output(0)})
   self.assertEqual(res[0].name, "b:0")
   self.assertEqual(res[1].name, "c_1:0")
Пример #17
0
 def test_node_collection_type_unique(self):
     g = gde.Graph()
     a = g.add_node("a", "a_op")
     a.set_outputs_from_pairs([(tf.int32, tf.TensorShape([]))])
     a.add_to_collection("mixed_collection")
     b = g.add_node("b", "b_op")
     b.set_outputs_from_pairs([(tf.int32, tf.TensorShape([]))])
     t = b.outputs[0]
     t.add_to_collection("mixed_collection")
     with self.assertRaisesRegex(
             TypeError, "Node collections cannot be Nodes and Tensors.*"):
         g.get_collection_by_name("mixed_collection")
Пример #18
0
    def test_make_list_of_node(self):
        """Test for gde.util.make_list_of_op."""
        g0_graph = tf.Graph()
        with g0_graph.as_default():
            tf.constant(1, name="a0")
            tf.constant(2, name="b0")
        g0 = gde.Graph(g0_graph)

        # Should extract the ops from the graph.
        self.assertEqual(len(gde.util.make_list_of_op(g0)), 2)
        # Should extract the ops from the tuple.
        self.assertEqual(len(gde.util.make_list_of_op((g0["a0"], g0["b0"]))),
                         2)
Пример #19
0
def _make_javascript_deployable_graph(frozen_graph_def, graph_gen, temp_dir,
                                      saved_model_location):
    # type: (tf.GraphDef, prepost.GraphGen, str, str) -> None
    """
  Prepare a SavedModel directory with a graph that is deployable via
  TensorFlow.js

  Args:
    frozen_graph_def: Base starter graph produced by inference, after turning
      variables to constants but before other rewrites.
    graph_gen: Callbacks for the current model
    temp_dir: Temporary directory in which to dump intermediate results in
      case they are needed for debugging.
    saved_model_location: Location where the final output SavedModel should go

  Returns:
    A graph that has been optimized. No preprocessing or postprocessing ops
    are attached, as the ops we would like to use for those purposes are not
    currently implemented in TensorFlow.js
  """
    g = gde.Graph(frozen_graph_def)

    print("            Number of ops in frozen graph: {}".format(
        len(frozen_graph_def.node)))

    g = _apply_generic_deployment_rewrites(g, graph_gen, temp_dir)

    # Graph preparation complete. Create a SavedModel "file" (actually a
    # directory)
    saved_model_graph = tf.Graph()
    with saved_model_graph.as_default():
        with tf.Session() as sess:
            tf.import_graph_def(g.to_graph_def(), name="")

            # simple_save needs pointers to tensors, so pull input and output
            # tensors out of the graph.
            inputs_dict = {
                n: saved_model_graph.get_tensor_by_name(n + ":0")
                for n in graph_gen.input_node_names()
            }
            outputs_dict = {
                n: saved_model_graph.get_tensor_by_name(n + ":0")
                for n in graph_gen.output_node_names()
            }
            if os.path.isdir(saved_model_location):
                shutil.rmtree(saved_model_location)
            tf.saved_model.simple_save(sess,
                                       export_dir=saved_model_location,
                                       inputs=inputs_dict,
                                       outputs=outputs_dict)
    print("SavedModel written to {}".format(saved_model_location))
Пример #20
0
    def build_graph_with_function(self):
        """Builds a tf graph for function (x + y) * 10.0 ."""
        @tf.function
        def multiplier_function(v):
            return tf.constant(10.0, name="function_multiplier") * v

        tf_g = tf.Graph()
        with tf_g.as_default():
            x = tf.placeholder(name="x", dtype=tf.float32, shape=[])
            y = tf.placeholder(name="y", dtype=tf.float32, shape=[])
            result_op = tf.add(x, y, name="add")
            func_call_op = multiplier_function(result_op)
            _ = tf.identity(func_call_op, name="output")
        return gde.Graph(g=tf_g)
Пример #21
0
  def test_compute_boundary_ts_2(self):
    """Test for ge.compute_boundary_ts."""
    tf_graph = tf.Graph()
    with tf_graph.as_default():
      a_tensor = tf.constant(1, name="a")
      b_tensor = tf.constant(1, name="b")
      c_tensor = tf.add(a_tensor, b_tensor, name="c")
      _ = a_tensor + c_tensor

    g = gde.Graph(tf_graph)
    input_ts, output_ts, inside_ts = gde.compute_boundary_ts([g["a"], g["c"]])
    self.assertEqual(list(input_ts), [g["b"].output(0)])
    self.assertEqual(list(output_ts), [g["a"].output(0), g["c"].output(0)])
    self.assertEqual(list(inside_ts), [g["a"].output(0)])
Пример #22
0
def _graft_pre_and_post_processing_to_main_graph(g):
    # type: (gde.Graph) -> None
    """
  Attach pre- and post-processing subgraphs to the main graph.

  Args:
    g: GDE representation of the core graph. Modified in place.
  """
    # Build the pre- and post-processing subgraphs and import into GDE
    pre_g = gde.Graph(_build_preprocessing_graph_def())
    post_g = gde.Graph(_build_postprocessing_graph_def())

    # Replace the graph's input placeholder with the contents of our
    # pre-processing graph.
    name_of_input_node = _INPUT_NODE_NAMES[0]
    gde.copy(pre_g, g)
    gde.reroute_ts(
        g.get_node_by_name("preprocessed_image").output(0),
        g.get_node_by_name(name_of_input_node).output(0))
    g.remove_node_by_name(name_of_input_node)
    g.rename_node("raw_image", name_of_input_node)

    # Tack on the postprocessing graph at the original output and rename
    # the postprocessed output to the original output's name
    # The original graph produces an output called "detection_classes".
    # The postprocessing graph goes from "detection_classes" to
    # "decoded_detection_classes".
    # The graph after modification produces decoded classes under the original
    # "detection_classes" name. The original output is renamed to
    # "raw_detection_classes".
    g.rename_node("detection_classes", "raw_detection_classes")
    gde.copy(post_g, g)
    gde.reroute_ts(
        g.get_node_by_name("raw_detection_classes").output(0),
        g.get_node_by_name("detection_classes").output(0))
    g.remove_node_by_name("detection_classes")
    g.rename_node("decoded_detection_classes", "detection_classes")
  def _create_replace_graph():
    """Subroutine of the next few tests. Creates the graph that all these
    tests use. Since the tests modify the graph, it needs to be recreated
    each time.

    Returns:
      (Graph object, c, target tensor to replace, new value, output tensor)"""
    tmp_graph = tf.Graph()
    with tmp_graph.as_default():
      a = tf.constant(1.0, name="a")
      b = tf.Variable(1.0, name="b")
      eps = tf.constant(0.001, name="eps")
      tf.identity(a + b + eps, name="c")
      tf.constant(2.0, name="a_new")
    ret = gde.Graph(tmp_graph)
    return ret, ret["a"].output(0), ret["a_new"].output(0), ret["c"].output(0)
Пример #24
0
    def test_identity(self):
        tf_g = tf.Graph()
        with tf_g.as_default():
            c = tf.constant(42)
            i1 = tf.identity(c, name="identity_tf")

        g = gde.Graph(tf_g)
        i2_node = gde.util.make_identity(g, "identity_gde",
                                         g.get_tensor_by_name(c.name))
        i2 = i2_node.outputs[0]

        with g.to_tf_graph().as_default():
            with tf.Session() as sess:
                result1 = sess.run(i1.name)
                result2 = sess.run(i2.name)
        self.assertEqual(result1, result2)
Пример #25
0
    def test_get_generating_consuming(self):
        """Test for gde.util.get_generating_ops and gde.util.get_generating_ops."""
        g0_graph = tf.Graph()
        with g0_graph.as_default():
            a0_tensor = tf.constant(1, name="a0")
            b0_tensor = tf.constant(2, name="b0")
            tf.add(a0_tensor, b0_tensor, name="c0")
        g0 = gde.Graph(g0_graph)
        a0 = g0["a0"].output(0)
        b0 = g0["b0"].output(0)
        c0 = g0["c0"].output(0)

        self.assertEqual(len(gde.util.get_generating_ops([a0, b0])), 2)
        self.assertEqual(len(gde.util.get_consuming_ops([a0, b0])), 1)
        self.assertEqual(len(gde.util.get_generating_ops([c0])), 1)
        self.assertEqual(gde.util.get_consuming_ops([c0]), [])
Пример #26
0
    def test_transform(self):
        transformer = gde.Transformer()

        def my_transform_op_handler(info, op, new_inputs):
            add_noise = op.name.startswith("Add")
            op_, op_outputs_ = gde.transform.copy_op_handler(
                info, op, new_inputs)
            if not add_noise:
                return op_, op_outputs_

            # add some noise to op
            # Old code:
            # with info.graph_.as_default():
            #   t_ = math_ops.add(
            #       constant_op.constant(1.0, shape=[10], name="Noise"),
            #       op_.outputs[0],
            #       name="AddNoise")
            noise_op = gde.make_const(info.graph_,
                                      "Noise",
                                      np.full([10], 1., dtype=np.float32),
                                      uniquify_name=True)
            add_noise_op = info.graph_.add_node("AddNoise",
                                                "Add",
                                                uniquify_name=True)
            add_noise_op.add_attr("T", tf.float32)
            add_noise_op.set_inputs([noise_op.outputs[0], op_.outputs[0]])
            add_noise_op.infer_outputs()
            t_ = add_noise_op.outputs[0]

            # return the "noisy" op
            return op_, [t_]

        transformer.transform_op_handler = my_transform_op_handler

        graph = gde.Graph()
        transformer(self.graph, graph, "", "")
        matcher0 = gde.OpMatcher("AddNoise").input_ops(
            "Noise",
            gde.OpMatcher("Add").input_ops("Const", "Input"))
        matcher1 = gde.OpMatcher("AddNoise_1").input_ops(
            "Noise_1",
            gde.OpMatcher("Add_1").input_ops("Const_1", matcher0))
        matcher2 = gde.OpMatcher("AddNoise_2").input_ops(
            "Noise_2",
            gde.OpMatcher("Add_2").input_ops("Const_2", matcher1))
        top = gde.select_ops("^AddNoise_2$", graph=graph)[0]
        self.assertTrue(matcher2(top))
Пример #27
0
    def setUp(self):
        tf_graph = tf.Graph()
        with tf_graph.as_default():
            a = tf.constant([1., 1.], shape=[2], name="a")
            with tf.name_scope("foo"):
                b = tf.constant([2., 2.], shape=[2], name="b")
                c = tf.add(a, b, name="c")
                d = tf.constant([3., 3.], shape=[2], name="d")
                with tf.name_scope("bar"):
                    _ = tf.add(c, d, name="e")
                    f = tf.add(c, d, name="f")
                    g = tf.add(c, a, name="g")
                    with tf.control_dependencies([c.op]):
                        _ = tf.add(f, g, name="h")

        self.graph = gde.Graph(tf_graph)
        self.f_op = self.graph[f.op.name]
Пример #28
0
    def test_control_outputs(self):
        """Test for the gde.util.ControlOutputs class."""
        g0_graph = tf.Graph()
        with g0_graph.as_default():
            a0_tensor = tf.constant(1, name="a0")
            b0_tensor = tf.constant(2, name="b0")
            x0_tensor = tf.constant(3, name="x0")
            with tf.control_dependencies([x0_tensor.op]):
                tf.add(a0_tensor, b0_tensor, name="c0")

        g0 = gde.Graph(g0_graph)
        x0_node = g0["x0"]
        c0_node = g0["c0"]
        control_outputs = gde.util.ControlOutputs(g0).get_all()
        self.assertEqual(len(control_outputs), 1)
        self.assertEqual(len(control_outputs[x0_node]), 1)
        self.assertIs(list(control_outputs[x0_node])[0], c0_node)
  def test_copy_assert(self):
    tf_g = tf.Graph()
    with tf_g.as_default():
      a = tf.constant(1, name="a")
      b = tf.constant(1, name="b")
      eq = tf.equal(a, b, name="EQ")
      assert_tf_op = tf.Assert(eq, [a, b])
      with tf.control_dependencies([assert_tf_op]):
        _ = tf.add(a, b)
    assert_op_name = assert_tf_op.name

    g = gde.Graph(tf_g)
    assert_op = g[assert_op_name]
    sgv = gde.make_view([assert_op, g["EQ"], g["a"], g["b"]])
    copier = gde.Transformer()
    _, info = copier(sgv, sgv.graph, "", "")
    new_assert_op = info.transformed(assert_op)
    self.assertIsNotNone(new_assert_op)
    def test_function_rewrite(self):
        tf_g = self.build_tf_graph()
        self.assertEqual(30.0, self.run_tf_graph(tf_g, 1.0, 2.0))
        graph = gde.Graph(tf_g)
        add_op = graph.get_node_by_name("add")
        function_name = add_op.outputs[0].consumers()[0].get_attr("f").name
        self.assertIn(function_name, graph.function_names)

        function_graph = graph.get_function_graph_by_name(function_name)
        function_multiplier_op = \
            function_graph.get_node_by_name("function_multiplier")
        self.assertEqual(10.0, function_multiplier_op.get_attr("value"))
        function_multiplier_op.replace_attr("value",
                                            np.array(1000.0, dtype=np.float32))

        self.assertEqual(3000.0,
                         self.run_tf_graph(graph.to_tf_graph(), 1.0, 2.0))
        return graph