def test_import_saved_model(self):
        g = self.test_function_rewrite()
        model_dir = self.temp_dir + "/saved_model"
        self.save_tf_graph(g.to_tf_graph(), model_dir)

        g = gde.saved_model_to_graph(model_dir)
        self.assertEqual(3000.0, self.run_tf_graph(g.to_tf_graph(), 1.0, 2.0))
Ejemplo n.º 2
0
def main(_):
    # Grab a copy of the official TensorFlow ResNet50 model in fp16.
    # See https://github.com/tensorflow/models/tree/master/official/resnet
    # Cache the tarball so we don't download it repeatedly
    if not os.path.isdir(_SAVED_MODEL_DIR):
        if os.path.isdir(_TMP_DIR):
            shutil.rmtree(_TMP_DIR)
        os.mkdir(_TMP_DIR)
        print("Downloading model tarball from {}".format(_MODEL_URL))
        urllib.request.urlretrieve(_MODEL_URL, _MODEL_TARBALL)
        print("Unpacking SavedModel from {} to {}".format(
            _MODEL_TARBALL, _TMP_DIR))
        with tarfile.open(_MODEL_TARBALL) as t:
            t.extractall(_TMP_DIR)

    # Load the SavedModel
    tf_g = tf.Graph()
    with tf.Session(graph=tf_g) as sess:
        tf.saved_model.load(sess, [tf.saved_model.tag_constants.SERVING],
                            _SAVED_MODEL_DIR)

    # print("Graph is:\n{}".format(tf_g.as_graph_def()))

    # Print out some statistics about tensor shapes
    print("BEFORE:")
    print("  Input tensor is {}".format(
        tf_g.get_tensor_by_name("input_tensor:0")))
    print("  Softmax tensor is {}".format(
        tf_g.get_tensor_by_name("softmax_tensor:0")))

    # Convert the SavedModel to a gde.Graph and rewrite the batch size to None
    g = gde.saved_model_to_graph(_SAVED_MODEL_DIR)
    gde.rewrite.change_batch_size(g, new_size=None, inputs=[g["input_tensor"]])
    if os.path.exists(_AFTER_MODEL_DIR):
        shutil.rmtree(_AFTER_MODEL_DIR)
    g.to_saved_model(_AFTER_MODEL_DIR)

    # Load the rewritten SavedModel into a TensorFlow graph
    after_tf_g = tf.Graph()
    with tf.Session(graph=after_tf_g) as sess:
        tf.saved_model.load(sess, [tf.saved_model.tag_constants.SERVING],
                            _AFTER_MODEL_DIR)
        print("AFTER:")
        print("  Input tensor is {}".format(
            after_tf_g.get_tensor_by_name("input_tensor:0")))
        print("  Softmax tensor is {}".format(
            after_tf_g.get_tensor_by_name("softmax_tensor:0")))

        # Feed a single array of zeros through the graph
        print("Running inference on dummy data")
        result = sess.run("softmax_tensor:0",
                          {"input_tensor:0": np.zeros([1, 224, 224, 3])})
        print("Result is {}".format(result))
Ejemplo n.º 3
0
    def test_change_batch_size_saved_model(self):
        """
    Verifies that changes of batch size survive serializing the graph as a
    SavedModel
    """
        temp_dir = tempfile.mkdtemp()
        try:
            tf_g = tf.Graph()
            with tf_g.as_default():
                input_tensor = tf.placeholder(dtype=tf.float32,
                                              shape=[32, 1],
                                              name="Input")
                result_tensor = input_tensor + 42.0
                with tf.Session() as sess:
                    tf.saved_model.simple_save(sess,
                                               temp_dir + "/model_before",
                                               inputs={"in": input_tensor},
                                               outputs={"out": result_tensor})

            # Make sure the original SavedModel loads properly
            with tf.Session(graph=tf.Graph()) as sess:
                tf.saved_model.load(sess,
                                    [tf.saved_model.tag_constants.SERVING],
                                    temp_dir + "/model_before")

            g = gde.saved_model_to_graph(temp_dir + "/model_before")
            gde.rewrite.change_batch_size(g, None, [g[input_tensor.name]])
            g.to_saved_model(temp_dir + "/model_after")

            with tf.Session(graph=tf.Graph()) as sess:
                tf.saved_model.load(sess,
                                    [tf.saved_model.tag_constants.SERVING],
                                    temp_dir + "/model_after")
                result = sess.run(
                    result_tensor.name,
                    {input_tensor.name: np.array([0]).reshape([1, 1])})
                self.assertTrue(
                    np.array_equal(result,
                                   np.array([42.]).reshape([1, 1])))
                result = sess.run(
                    result_tensor.name,
                    {input_tensor.name: np.array([0, 1]).reshape([2, 1])})
                self.assertTrue(
                    np.array_equal(result,
                                   np.array([42., 43.]).reshape([2, 1])))
        finally:
            # Remove temp dir unconditionally. Comment out try and finally if you
            # want the directory to stick around after a test failure.
            shutil.rmtree(temp_dir)
    def test_number_attr_support(self):
        model_dir = self.temp_dir + "/saved_model"

        @tf.function
        def test_function(c):
            cdim = tf.constant(1, tf.int32)
            c1 = tf.constant([2, 1, 5], tf.int32, name="FuncConst")
            c2 = tf.constant([2, 1, 5], tf.int32)
            # ConcatOffset has variable number of intputs and outputs
            # that is using number_attr in functions
            concat_offset = tf.raw_ops.ConcatOffset(concat_dim=cdim,
                                                    shape=[c, c1, c2])
            out = tf.math.reduce_sum(concat_offset)
            return out

        tf_g = tf.Graph()
        with tf_g.as_default():
            with tf.Session() as sess:
                c = tf.placeholder(name="c", dtype=tf.int32)
                out_func = test_function(c)
                c = tf_g.get_tensor_by_name("c:0")
                self.assertEqual(3, sess.run(out_func, {c: [2, 1, 5]}))

                tf.saved_model.simple_save(sess,
                                           model_dir,
                                           inputs={"c": c},
                                           outputs={"out_func": out_func})

        g = gde.saved_model_to_graph(model_dir)

        tf_g = g.to_tf_graph()
        with tf.Session(graph=tf_g) as sess:
            output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
            c = tf_g.get_tensor_by_name("c:0")
            self.assertEqual(3, sess.run(output_tensor, {c: [2, 1, 5]}))

        f = g.get_function_graph_by_name(g.function_names[0])
        func_const_op = f.get_node_by_name("FuncConst")
        func_const_op.replace_attr("value", np.array([2, 2, 5],
                                                     dtype=np.int32))

        tf_g = g.to_tf_graph()
        with tf.Session(graph=tf_g) as sess:
            output_tensor = tf_g.get_tensor_by_name("PartitionedCall:0")
            c = tf_g.get_tensor_by_name("c:0")
            self.assertEqual(4, sess.run(output_tensor, {c: [2, 1, 5]}))
Ejemplo n.º 5
0
    def test_export_saved_model_with_var(self):
        """Import a SavedModel with a variable, modify the resulting graph,
    and write it out as a second SavedModel"""
        tf_g = tf.Graph()
        with tf_g.as_default():
            input_tensor = tf.placeholder(dtype=tf.int32,
                                          shape=[],
                                          name="Input")
            var_tensor = tf.Variable(initial_value=42, name="FortyTwo")
            result_tensor = input_tensor + var_tensor

            with tf.Session() as sess:
                sess.run(var_tensor.initializer)
                model_dir = self.temp_dir + "/saved_model"
                tf.saved_model.simple_save(sess,
                                           model_dir,
                                           inputs={"in": input_tensor},
                                           outputs={"out": result_tensor})

        g = gde.saved_model_to_graph(model_dir)

        # Verify that the import went ok
        with g.to_tf_graph().as_default():
            with tf.Session() as sess:
                sess.run(var_tensor.initializer.name)
                result = sess.run(result_tensor.name, {input_tensor.name: 1})
        self.assertEqual(result, 43)

        # Now rewrite plus to minus.
        result_op = g.get_node_by_name(result_tensor.op.name)
        result_op.change_op_type("Sub")

        second_model_dir = self.temp_dir + "/saved_model_after"
        g.to_saved_model(second_model_dir)

        after_tf_g = tf.Graph()
        with after_tf_g.as_default():
            with tf.Session() as sess:
                tf.saved_model.load(sess,
                                    [tf.saved_model.tag_constants.SERVING],
                                    second_model_dir)
                result = sess.run(result_tensor.name, {input_tensor.name: 1})
                self.assertEqual(result, -41)
Ejemplo n.º 6
0
    def test_import_saved_model(self):
        tf_g = tf.Graph()
        with tf_g.as_default():
            input_tensor = tf.placeholder(dtype=tf.int32,
                                          shape=[],
                                          name="Input")
            result_tensor = input_tensor + 42

            model_dir = self.temp_dir + "/saved_model"
            with tf.Session() as sess:
                tf.saved_model.simple_save(sess,
                                           model_dir,
                                           inputs={"in": input_tensor},
                                           outputs={"out": result_tensor})

        g = gde.saved_model_to_graph(model_dir)
        with g.to_tf_graph().as_default():
            with tf.Session() as sess:
                result = sess.run(result_tensor.name, {input_tensor.name: 1})
        self.assertEqual(result, 43)
Ejemplo n.º 7
0
    def test_collection_roundtrip_savedmodel(self):
        tf_g = tf.Graph()
        with tf_g.as_default():
            x = tf.placeholder(dtype=tf.float32, shape=[])
            y = tf.placeholder(dtype=tf.float32, shape=[])
            w = tf.Variable([1.0, 2.0], name="w")
            c = tf.constant(0.0)
            tf.add_to_collection('tensors', c)
            y_model = tf.multiply(x, w[0]) + w[1] + c

            error = tf.square(y - y_model)
            train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
            model = tf.global_variables_initializer()

            with tf.Session() as sess:
                sess.run(model)
                sess.run(train_op, feed_dict={x: 0.5, y: 1.0})
                sess.run(w)
                model_dir = self.temp_dir + "/saved_model"
                tf.saved_model.simple_save(sess,
                                           model_dir,
                                           inputs={"in": x},
                                           outputs={"out": error})

        expected_collections = ['variables', 'tensors', 'train_op']

        # Checking for initial collections
        for name in expected_collections:
            self.assertIn(name, tf_g.collections)

        # Load tf savedmodel with gde
        g = gde.saved_model_to_graph(model_dir)

        # Check collections are loaded from tf savedmodel
        collections = g.get_all_collection_keys()
        for name in expected_collections:
            self.assertIn(name, collections)

        # Check collections are assigned when loaded
        w_gde = g.get_variable_by_name(w.name)
        self.assertIn('variables', w_gde.collection_names)
        c_gde = g.get_tensor_by_name(c.name)
        self.assertIn('tensors', c_gde.collection_names)
        train_op_gde = g.get_node_by_name(train_op.name)
        self.assertIn('train_op', train_op_gde.collection_names)

        # Use gde to write savedmodel
        second_model_dir = self.temp_dir + "/saved_model_after"
        g.to_saved_model(second_model_dir)

        # Load gde savedmodel in tf session
        after_tf_g = tf.Graph()
        with after_tf_g.as_default():
            with tf.Session() as sess:
                tf.saved_model.load(sess,
                                    [tf.saved_model.tag_constants.SERVING],
                                    second_model_dir)

        # Checking collections loaded back from gde savedmodel
        for name in expected_collections:
            self.assertIn(name, after_tf_g.collections)

        # Check that collections have expected contents after tf.saved_model.load
        variable_collection_names = [
            v.name for v in after_tf_g.get_collection('variables')
        ]
        self.assertIn(w_gde.name, variable_collection_names)
        tensors_collection_names = [
            t.name for t in after_tf_g.get_collection('tensors')
        ]
        self.assertIn(c.name, tensors_collection_names)
        op_collection_names = [
            o.name for o in after_tf_g.get_collection('train_op')
        ]
        self.assertIn(train_op.name, op_collection_names)