def func(i):
     # tf2 works a little different than tf1 - that is why there is a is_tf2() here
     if is_tf2():
         one = tf.constant(np.array([1], dtype=np.int32))
     else:
         one = tf.constant(np.array(1, dtype=np.int32))
     c = lambda i: tf.less(i, 10)
     b = lambda i: tf.add(i, one)
     r = tf.while_loop(c, b, [i])
     if is_tf2():
         r = tf.reshape(r, [-1])
     return tf.identity(r, name=_TFOUTPUT)
 def func(i):
     # tf2 works a little different than tf1 - that is why there is a is_tf2() here
     if is_tf2():
         one = tf.constant(np.array([1], dtype=np.int32))
     else:
         one = tf.constant(np.array(1, dtype=np.int32))
     c = lambda i: tf.logical_and(tf.less(i, 10), tf.greater_equal(
         i, 3))
     b = lambda i: tf.add(i, one)
     r = tf.while_loop(c, b, [i])
     if is_tf2():
         r = tf.reshape(r, [-1])
     return tf.identity(r, name="output")
Exemple #3
0
 def new_graph_validator(g):
     good = True
     if graph_validator is not None:
         good = good and graph_validator(g)
     if is_tf2() and ':' in g.outputs[0]:
         # Only check for tf2 and tfjs, not tflite
         good = good and check_op_count(g, "Loop", 0, disabled=False)
         good = good and check_op_count(g, "Scan", 0, disabled=False)
     return good
    def test_simple_while_loop(self):
        def func(i):
            # tf2 works a little different than tf1 - that is why there is a is_tf2() here
            if is_tf2():
                one = tf.constant(np.array([1], dtype=np.int32))
            else:
                one = tf.constant(np.array(1, dtype=np.int32))
            c = lambda i: tf.less(i, 10)
            b = lambda i: tf.add(i, one)
            r = tf.while_loop(c, b, [i])
            if is_tf2():
                r = tf.reshape(r, [-1])
            return tf.identity(r, name=_TFOUTPUT)

        if is_tf2():
            x_val = np.array([0], dtype=np.int32)
        else:
            x_val = np.array(0, dtype=np.int32)
        self.run_test_case(func, {_INPUT: x_val}, [], [_OUTPUT], rtol=1e-06)
Exemple #5
0
    def test_keras_hashtable(self):

        feature_cols = [
            tf.feature_column.numeric_column("f_inp", dtype=tf.float32),
            tf.feature_column.indicator_column(
                tf.feature_column.categorical_column_with_vocabulary_list("s_inp", ["a", "b", "z"], num_oov_buckets=1)
            )
        ]
        feature_layer = tf.keras.layers.DenseFeatures(feature_cols)

        input_dict = {}
        input_dict["f_inp"] = tf.keras.Input(name="f_inp", shape=(1,), dtype=tf.float32)
        input_dict["s_inp"] = tf.keras.Input(name="s_inp", shape=(1,), dtype=tf.string)

        inputs = list(input_dict.values())
        standard_features = feature_layer(input_dict)
        hidden1 = tf.keras.layers.Dense(512, activation='relu')(standard_features)
        output = tf.keras.layers.Dense(10, activation='softmax')(hidden1)
        model = tf.keras.Model(inputs=inputs, outputs=output)
        model.compile(optimizer='adam', loss=tf.keras.losses.mean_squared_error)

        inp1 = np.array([[2.], [3.]], dtype=np.float32)
        inp2 = np.array([["a"], ["b"]], dtype=np.str)
        if not is_tf2():
            tf.keras.backend.get_session().run(tf.tables_initializer(name='init_all_tables'))
        k_res = model.predict([inp1, inp2])
        spec = (tf.TensorSpec((None, 1), dtype=tf.float32, name="f_inp"),
                tf.TensorSpec((None, 1), tf.string, name="s_inp"))
        output_path = os.path.join(self.test_data_directory, "model.onnx")

        model_proto, _ = tf2onnx.convert.from_keras(
            model, input_signature=spec, opset=self.config.opset, output_path=output_path,
            extra_opset=[helper.make_opsetid("ai.onnx.contrib", 1)])
        output_names = [n.name for n in model_proto.graph.output]

        o_res = self.run_onnxruntime(output_path, {"f_inp": inp1, "s_inp": inp2}, output_names, use_custom_ops=True)
        self.assertAllClose(k_res, o_res[0], rtol=0.3, atol=0.1)
        # make sure the original keras model wasn't trashed
        k_res2 = model.predict([inp1, inp2])
        self.assertAllClose(k_res2, o_res[0], rtol=0.3, atol=0.1)
    def run_test_case(self,
                      func,
                      feed_dict,
                      input_names_with_port,
                      output_names_with_port,
                      rtol=1e-07,
                      atol=1e-5,
                      convert_var_to_const=True,
                      constant_fold=True,
                      check_value=True,
                      check_shape=True,
                      check_dtype=True,
                      process_args=None,
                      onnx_feed_dict=None,
                      graph_validator=None,
                      as_session=False,
                      large_model=False):
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict
        input_names_with_port = list(feed_dict)
        tf_reset_default_graph()
        graph_def = None

        np.random.seed(1)  # Make it reproducible.
        clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
        if is_tf2() and not as_session:
            #
            # use eager to execute the tensorflow func
            #
            # numpy doesn't work for all ops, make it tf.Tensor()
            input_tensors = [
                tf.TensorSpec(shape=v.shape,
                              dtype=tf.as_dtype(v.dtype),
                              name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            input_list = [
                tf.convert_to_tensor(v,
                                     dtype=tf.as_dtype(v.dtype),
                                     name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            tf.random.set_seed(1)
            expected = func(*input_list)
            if isinstance(expected, (list, tuple)):
                # list or tuple
                expected = [x.numpy() for x in expected]
            else:
                # single result
                expected = [expected.numpy()]

            # now make the eager functions a graph
            concrete_func = tf.function(func,
                                        input_signature=tuple(input_tensors))
            concrete_func = concrete_func.get_concrete_function()
            graph_def = from_function(concrete_func,
                                      input_names=list(feed_dict.keys()),
                                      output_names=output_names_with_port,
                                      large_model=large_model)
        else:
            #
            # use graph to execute the tensorflow func
            #
            with tf_session() as sess:
                tf_set_random_seed(1)
                input_list = []
                for k, v in clean_feed_dict.items():
                    input_list.append(
                        tf_placeholder(name=k,
                                       shape=v.shape,
                                       dtype=tf.as_dtype(v.dtype)))
                func(*input_list)
                variables_lib.global_variables_initializer().run()
                tf_tables_initializer().run()
                output_dict = []
                for out_name in output_names_with_port:
                    output_dict.append(sess.graph.get_tensor_by_name(out_name))
                expected = sess.run(output_dict, feed_dict=feed_dict)
                graph_def = freeze_session(sess,
                                           input_names=list(feed_dict.keys()),
                                           output_names=output_names_with_port)

            tf_reset_default_graph()
            with tf_session() as sess:
                tf.import_graph_def(graph_def, name='')
                graph_def = tf_optimize(list(feed_dict.keys()),
                                        output_names_with_port,
                                        graph_def,
                                        fold_constant=constant_fold)

        tf_reset_default_graph()
        with tf_session() as sess:
            const_node_values = None
            if large_model:
                const_node_values = compress_graph_def(graph_def)
            tf.import_graph_def(graph_def, name='')

            if self.config.is_debug_mode:
                model_path = os.path.join(
                    self.test_data_directory,
                    self._testMethodName + "_after_tf_optimize.pb")
                utils.save_protobuf(model_path, graph_def)
                self.logger.debug("created file  %s", model_path)

            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 input_names=list(feed_dict.keys()),
                                 output_names=output_names_with_port,
                                 target=self.config.target,
                                 const_node_values=const_node_values,
                                 **process_args)
            g = optimizer.optimize_graph(g)
            actual = self.run_backend(g, output_names_with_port,
                                      onnx_feed_dict, large_model)

        for expected_val, actual_val in zip(expected, actual):
            if check_value:
                self.assertAllClose(expected_val,
                                    actual_val,
                                    rtol=rtol,
                                    atol=atol)
            if check_dtype:
                self.assertEqual(expected_val.dtype, actual_val.dtype)
            # why need shape checke: issue when compare [] with scalar
            # https://github.com/numpy/numpy/issues/11071
            if check_shape:
                self.assertEqual(expected_val.shape, actual_val.shape)

        if graph_validator:
            self.assertTrue(graph_validator(g))

        return g
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variables as variables_lib
from common import get_test_config
from tf2onnx import utils
from tf2onnx.tfonnx import process_tf_graph
from tf2onnx import optimizer
from tf2onnx.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, from_function, freeze_session
from tf2onnx.tf_loader import tf_optimize, is_tf2
from tf2onnx.tf_utils import compress_graph_def
from tf2onnx.graph import ExternalTensorStorage

if is_tf2():
    tf_set_random_seed = tf.compat.v1.set_random_seed
    tf_tables_initializer = tf.compat.v1.tables_initializer
else:
    tf_set_random_seed = tf.set_random_seed
    tf_tables_initializer = tf.tables_initializer


class Tf2OnnxBackendTestBase(unittest.TestCase):
    def setUp(self):
        self.config = get_test_config()
        tf_reset_default_graph()
        # reset name generation on every test
        utils.INTERNAL_NAME = 1
        np.random.seed(1)  # Make it reproducible.
        self.logger = logging.getLogger(self.__class__.__name__)
    def freeze_and_run_tf(self, func, feed_dict, outputs, as_session,
                          premade_placeholders, large_model, constant_fold):
        np.random.seed(1)  # Make it reproducible.
        clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
        if is_tf2() and not as_session:
            #
            # use eager to execute the tensorflow func
            #
            # numpy doesn't work for all ops, make it tf.Tensor()
            input_tensors = [
                tf.TensorSpec(shape=v.shape,
                              dtype=tf.as_dtype(v.dtype),
                              name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            input_list = [
                tf.convert_to_tensor(v,
                                     dtype=tf.as_dtype(v.dtype),
                                     name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            tf.random.set_seed(1)
            result = func(*input_list)
            if isinstance(result, (list, tuple)):
                # list or tuple
                result = [x.numpy() for x in result]
            else:
                # single result
                result = [result.numpy()]

            # now make the eager functions a graph
            concrete_func = tf.function(func,
                                        input_signature=tuple(input_tensors))
            concrete_func = concrete_func.get_concrete_function()
            graph_def = from_function(concrete_func,
                                      input_names=list(feed_dict.keys()),
                                      output_names=outputs,
                                      large_model=large_model)
            initialized_tables = None
        else:
            #
            # use graph to execute the tensorflow func
            #
            with tf_session() as sess:
                tf_set_random_seed(1)
                input_list = []
                if not premade_placeholders:
                    for k, v in clean_feed_dict.items():
                        input_list.append(
                            tf_placeholder(name=k,
                                           shape=v.shape,
                                           dtype=tf.as_dtype(v.dtype)))
                func(*input_list)
                variables_lib.global_variables_initializer().run()
                tf_tables_initializer().run()

                output_dict = []
                for out_name in outputs:
                    output_dict.append(sess.graph.get_tensor_by_name(out_name))
                result = sess.run(output_dict, feed_dict=feed_dict)
                graph_def = freeze_session(sess,
                                           input_names=list(feed_dict.keys()),
                                           output_names=outputs)
                table_names, key_dtypes, value_dtypes = get_hash_table_info(
                    graph_def)
                initialized_tables = {}
                for n, k_dtype, val_dtype in zip(table_names, key_dtypes,
                                                 value_dtypes):
                    h = lookup_ops.hash_table_v2(k_dtype,
                                                 val_dtype,
                                                 shared_name=n)
                    k, v = lookup_ops.lookup_table_export_v2(
                        h, k_dtype, val_dtype)
                    initialized_tables[n] = (sess.run(k), sess.run(v))

            tf_reset_default_graph()
            with tf_session() as sess:
                tf.import_graph_def(graph_def, name='')
                graph_def = tf_optimize(list(feed_dict.keys()),
                                        outputs,
                                        graph_def,
                                        fold_constant=constant_fold)

        model_path = os.path.join(
            self.test_data_directory,
            self._testMethodName + "_after_tf_optimize.pb")
        utils.save_protobuf(model_path, graph_def)
        self.logger.debug("created file  %s", model_path)
        return result, graph_def, initialized_tables
def skip_tf2(message=""):
    """ Skip if tf_version > max_required_version """
    reason = _append_message("test needs to be fixed for tf-2.x", message)
    return unittest.skipIf(tf_loader.is_tf2(), reason)
    def freeze_and_run_tf(self, func, feed_dict, outputs, as_session,
                          premade_placeholders, large_model):
        np.random.seed(1)  # Make it reproducible.
        clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
        if is_tf2() and not as_session:
            #
            # use eager to execute the tensorflow func
            #
            # numpy doesn't work for all ops, make it tf.Tensor()
            input_tensors = [
                tf.TensorSpec(shape=v.shape,
                              dtype=tf.as_dtype(v.dtype),
                              name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            input_list = [
                tf.convert_to_tensor(v,
                                     dtype=tf.as_dtype(v.dtype),
                                     name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            tf.random.set_seed(1)
            result = func(*input_list)
            if isinstance(result, (list, tuple)):
                # list or tuple
                result = [x.numpy() for x in result]
            else:
                # single result
                result = [result.numpy()]

            # now make the eager functions a graph
            concrete_func = tf.function(func,
                                        input_signature=tuple(input_tensors))
            concrete_func = concrete_func.get_concrete_function()
            graph_def = from_function(concrete_func,
                                      input_names=list(feed_dict.keys()),
                                      output_names=outputs,
                                      large_model=large_model)
            initialized_tables = None
        else:
            #
            # use graph to execute the tensorflow func
            #
            with tf_session() as sess:
                tf_set_random_seed(1)
                input_list = []
                if not premade_placeholders:
                    for k, v in clean_feed_dict.items():
                        input_list.append(
                            tf_placeholder(name=k,
                                           shape=v.shape,
                                           dtype=tf.as_dtype(v.dtype)))
                func(*input_list)
                variables_lib.global_variables_initializer().run()
                tf_tables_initializer().run()

                output_dict = []
                for out_name in outputs:
                    output_dict.append(sess.graph.get_tensor_by_name(out_name))
                result = sess.run(output_dict, feed_dict=feed_dict)
                graph_def = freeze_session(sess,
                                           input_names=list(feed_dict.keys()),
                                           output_names=outputs)
                table_info = get_hash_table_info(graph_def)
                initialized_tables = {}
                for info in table_info:
                    if info.shared_name is None:
                        continue
                    h = lookup_ops.hash_table_v2(info.key_dtype,
                                                 info.val_dtype,
                                                 shared_name=info.shared_name)
                    k, v = lookup_ops.lookup_table_export_v2(
                        h, info.key_dtype, info.val_dtype)
                    initialized_tables[info.shared_name] = (sess.run(k),
                                                            sess.run(v))

            tf_reset_default_graph()
            with tf_session() as sess:
                tf.import_graph_def(graph_def, name='')
                graph_def = tf_optimize(list(feed_dict.keys()), outputs,
                                        graph_def)

        return result, graph_def, initialized_tables