def func(i): # tf2 works a little different than tf1 - that is why there is a is_tf2() here if is_tf2(): one = tf.constant(np.array([1], dtype=np.int32)) else: one = tf.constant(np.array(1, dtype=np.int32)) c = lambda i: tf.less(i, 10) b = lambda i: tf.add(i, one) r = tf.while_loop(c, b, [i]) if is_tf2(): r = tf.reshape(r, [-1]) return tf.identity(r, name=_TFOUTPUT)
def func(i): # tf2 works a little different than tf1 - that is why there is a is_tf2() here if is_tf2(): one = tf.constant(np.array([1], dtype=np.int32)) else: one = tf.constant(np.array(1, dtype=np.int32)) c = lambda i: tf.logical_and(tf.less(i, 10), tf.greater_equal( i, 3)) b = lambda i: tf.add(i, one) r = tf.while_loop(c, b, [i]) if is_tf2(): r = tf.reshape(r, [-1]) return tf.identity(r, name="output")
def test_simple_while_loop(self): def func(i): # tf2 works a little different than tf1 - that is why there is a is_tf2() here if is_tf2(): one = tf.constant(np.array([1], dtype=np.int32)) else: one = tf.constant(np.array(1, dtype=np.int32)) c = lambda i: tf.less(i, 10) b = lambda i: tf.add(i, one) r = tf.while_loop(c, b, [i]) if is_tf2(): r = tf.reshape(r, [-1]) return tf.identity(r, name=_TFOUTPUT) if is_tf2(): x_val = np.array([0], dtype=np.int32) else: x_val = np.array(0, dtype=np.int32) self.run_test_case(func, {_INPUT: x_val}, [], [_OUTPUT], rtol=1e-06)
# SPDX-License-Identifier: Apache-2.0 """Unit Tests for layered lstm""" import numpy as np import tensorflow as tf from tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase from common import unittest_main, check_lstm_count, check_opset_after_tf_version, skip_tf2 from tf2onnxnightly.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop # pylint: disable=invalid-name if is_tf2(): LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn else: LSTMCell = tf.contrib.rnn.LSTMCell LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn class LSTMLayeredTests(Tf2OnnxBackendTestBase): @check_opset_after_tf_version("1.15", 8, "might need Scan") @skip_tf2() def test_layered_lstm(self): units = 5
def freeze_and_run_tf(self, func, feed_dict, outputs, as_session, premade_placeholders, large_model, constant_fold): np.random.seed(1) # Make it reproducible. clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()} if is_tf2() and not as_session: # # use eager to execute the tensorflow func # # numpy doesn't work for all ops, make it tf.Tensor() input_tensors = [ tf.TensorSpec(shape=v.shape, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k)) for k, v in feed_dict.items() ] input_list = [ tf.convert_to_tensor(v, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k)) for k, v in feed_dict.items() ] tf.random.set_seed(1) result = func(*input_list) if isinstance(result, (list, tuple)): # list or tuple result = [x.numpy() for x in result] else: # single result result = [result.numpy()] # now make the eager functions a graph concrete_func = tf.function(func, input_signature=tuple(input_tensors)) concrete_func = concrete_func.get_concrete_function() graph_def = from_function(concrete_func, input_names=list(feed_dict.keys()), output_names=outputs, large_model=large_model) initialized_tables = None else: # # use graph to execute the tensorflow func # with tf_session() as sess: tf_set_random_seed(1) input_list = [] if not premade_placeholders: for k, v in clean_feed_dict.items(): input_list.append( tf_placeholder(name=k, shape=v.shape, dtype=tf.as_dtype(v.dtype))) func(*input_list) variables_lib.global_variables_initializer().run() tf_tables_initializer().run() output_dict = [] for out_name in outputs: output_dict.append(sess.graph.get_tensor_by_name(out_name)) result = sess.run(output_dict, feed_dict=feed_dict) graph_def = freeze_session(sess, input_names=list(feed_dict.keys()), output_names=outputs) table_names, key_dtypes, value_dtypes = get_hash_table_info( graph_def) initialized_tables = {} for n, k_dtype, val_dtype in zip(table_names, key_dtypes, value_dtypes): h = lookup_ops.hash_table_v2(k_dtype, val_dtype, shared_name=n) k, v = lookup_ops.lookup_table_export_v2( h, k_dtype, val_dtype) initialized_tables[n] = (sess.run(k), sess.run(v)) tf_reset_default_graph() with tf_session() as sess: tf.import_graph_def(graph_def, name='') graph_def = tf_optimize(list(feed_dict.keys()), outputs, graph_def, fold_constant=constant_fold) model_path = os.path.join( self.test_data_directory, self._testMethodName + "_after_tf_optimize.pb") utils.save_protobuf(model_path, graph_def) self.logger.debug("created file %s", model_path) return result, graph_def, initialized_tables
def skip_tf2(message=""): """ Skip if tf_version > max_required_version """ reason = _append_message("test needs to be fixed for tf-2.x", message) return unittest.skipIf(tf_loader.is_tf2(), reason)