def test_write_and_gather(self, use_cpu_only, backend, size_dynamic_shape): size, dynamic_size, element_shape = size_dynamic_shape @make_tf_graph([(1, ), (1, )]) def build_model(x, y): ta = tf.TensorArray( tf.float32, size=size, dynamic_size=dynamic_size, element_shape=element_shape, ) ta = ta.write(0, x) ta = ta.write(1, y) return ta.gather(indices=[0, 1]) model, inputs, outputs = build_model input_values = [ np.array([3.14], dtype=np.float32), np.array([6.17], dtype=np.float32), ] input_dict = dict(zip(inputs, input_values)) run_compare_tf(model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend)
def test_fused_batch_norm_v3(self, use_cpu_only, backend, epsilon): input_shape = np.random.randint(low=1, high=4, size=4) attr_shape = [list(input_shape)[-1]] m = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) v = random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0) o = random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0) s = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) @make_tf_graph([input_shape]) def build_model(x): return tf.raw_ops.FusedBatchNormV3( x=x, scale=s, offset=o, mean=m, variance=v, epsilon=epsilon, is_training=False, )[0] model, inputs, outputs = build_model input_values = [random_gen(shape=input_shape)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend, atol=1e-2, rtol=1e-3, )
def test_add_v2(self, use_cpu_only, backend, rank): x_shape = list(np.random.randint(low=2, high=5, size=rank)) y_shape = x_shape[:] for i in range(rank): if np.random.randint(4) == 0: y_shape[i] = 1 if np.random.randint(2) == 0: y_shape = [1] + y_shape if use_cpu_only: dtype = np.float32 else: dtype = np.float16 @make_tf_graph([x_shape, y_shape]) def build_model(x, y): return tf.raw_ops.AddV2(x=x, y=y) model, inputs, outputs = build_model input_values = [ np.random.randint(low=-1000, high=1000, size=x_shape).astype(dtype), np.random.randint(low=-1000, high=1000, size=y_shape).astype(dtype), ] input_dict = dict(zip(inputs, input_values)) run_compare_tf(model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend)
def test_while_loop_power(self, use_cpu_only, backend): @make_tf_graph([(1,)]) def build_model(x): i = 0 while i < 3: x *= x i += 1 return x model, inputs, outputs = build_model input_values = [np.array([2.0], dtype=np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_if_unary_double_if_positive_else_square(self, use_cpu_only, backend): @make_tf_graph([(1,)]) def build_model(x): if x >= 0: out = x + x else: out = x * x return out model, inputs, outputs = build_model input_values = [np.array([2], dtype=np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_if_unary_const(self, use_cpu_only, backend): @make_tf_graph([(1,)]) def build_model(x): if x > 0.5: y = x - 0.5 else: y = x + 0.5 return y model, inputs, outputs = build_model input_values = [np.array([0.7], dtype=np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_while_loop_nested_body(self, use_cpu_only, backend): @make_tf_graph([(1,)]) def build_model(x): i, j = 0, 10 while i < j: while 2 * i < i + 2: i += 1 x -= 1 i += 2 x *= 2 return x model, inputs, outputs = build_model input_values = [np.array([9.0], dtype=np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_if_binary_add_if_else_mul(self, use_cpu_only, backend): @make_tf_graph([(1,), (1,)]) def build_model(x, y): if x > y: out = x + x else: out = x * x return out model, inputs, outputs = build_model input_values = [ np.array([3], dtype=np.float32), np.array([7], dtype=np.float32), ] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_partial_element_shape(self, use_cpu_only, backend, size_dynamic_shape): size, dynamic_size, element_shape = size_dynamic_shape @make_tf_graph([(3, 1, 8)]) def build_model(x): ta = tf.TensorArray( tf.float32, size=size, dynamic_size=dynamic_size, element_shape=element_shape, ) ta = ta.scatter(indices=[0, 1, 2], value=x) return ta.read(0), ta.read(1), ta.read(2) model, inputs, outputs = build_model input_values = [np.random.rand(3, 1, 8).astype(np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )
def test_unstack_and_read(self, use_cpu_only, backend, size_dynamic_shape): size, dynamic_size, element_shape = size_dynamic_shape @make_tf_graph([(3, 1)]) def build_model(x): ta = tf.TensorArray( tf.float32, size=size, dynamic_size=dynamic_size, element_shape=element_shape, ) ta = ta.unstack(x) return ta.read(0), ta.read(1), ta.read(2) model, inputs, outputs = build_model input_values = [np.array([[3.14], [6.17], [12.14]], dtype=np.float32)] input_dict = dict(zip(inputs, input_values)) run_compare_tf( model, input_dict, outputs, use_cpu_only=use_cpu_only, backend=backend )