def test_tensor_rearrange():
    tensor_rearrange = TensorRearrange(seed=713)
    in_node_a = tensor_rearrange.get_placeholder("input_0")
    in_node_b = tensor_rearrange.get_placeholder("input_1")
    in_node_c = tensor_rearrange.get_placeholder("input_2")
    stitched = tf.dynamic_stitch(
        [[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]],
        [in_node_a, in_node_b, in_node_c])  # should be 11,5,4
    list_of_parts = tf.dynamic_partition(
        tf.transpose(stitched, perm=[1, 2, 0]),
        [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]],
        num_partitions=4
    )  # after permute becomes 5,4,11, return all partitions 5,11
    node_a = tf.div(list_of_parts[0], list_of_parts[1])
    node_b = tf.divide(list_of_parts[2], list_of_parts[3])
    trace_node = tf.trace(node_a) + node_b  # there is a broadcast here
    out_node = tf.cast(tf.count_nonzero(trace_node),
                       dtype=tf.float32) + tf.Variable(
                           tf.random_normal(shape=(2, 3)))

    placeholders = [in_node_a, in_node_b, in_node_c]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="partition_stitch_misc")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(tensor_rearrange.get_test_data()) \
        .build_save_frozen_graph()
示例#2
0
def test_tensor_dot_misc():
    tensor_dot_misc = TensorDotMisc(seed=713,
                                    feature_size_a=[36, 3, 4, 5],
                                    feature_size_b=[5, 5, 3, 4])
    in_node_a = tensor_dot_misc.get_placeholder("input_a")
    in_node_b = tensor_dot_misc.get_placeholder("input_b")
    tensor_dot_node = tf.tensordot(in_node_a, in_node_b,
                                   axes=[[3, 1], [1, 2]])  # 36,4,5,4
    permute_axis = tf.transpose(tensor_dot_node, perm=[0, 1, 3, 2])  # 36,4,4,5
    batch_to_space_node_a = tf.batch_to_space_nd(permute_axis,
                                                 block_shape=(1, 4),
                                                 crops=[[0, 0],
                                                        [1, 2]])  # 9,4,13,5
    batch_to_space_node_b = tf.batch_to_space(batch_to_space_node_a,
                                              block_size=3,
                                              crops=[[1, 5], [4,
                                                              3]])  # 1,6,32,5
    space_to_depth_node = tf.round(
        tf.space_to_depth(batch_to_space_node_b, block_size=2))  # 1,3,16,20
    some_add = tf.add(tf.Variable(tf.random_normal(
        (16, 20), dtype=tf.float64)), space_to_depth_node)  # broadcast
    out_node = tf.round(some_add, name="output")

    placeholders = [in_node_a, in_node_b]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="tensor_dot_misc")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(tensor_dot_misc.get_test_data()) \
        .build_save_frozen_graph()
示例#3
0
def test_add_n():
    ops = ["add", "add_n"]
    addn = AddN(seed=13)
    in_node_0 = addn.get_placeholder("input_0")
    in_node_1 = addn.get_placeholder("input_1")
    k0 = tf.Variable(tf.random_normal([3, 3]), name="in0", dtype=tf.float32)

    constr = DifferentiableMathOps(in_node_0, in_node_1)

    for op in ops:
        print "Running " + op
        answer = constr.execute(op)
        print answer
        constr.set_a(answer)

    out_node = tf.rsqrt(answer, name="output")

    placeholders = [in_node_0, in_node_1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="add_n")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(addn.get_test_data()) \
        .build_save_frozen_graph()
示例#4
0
def test_tensor_misc():
    tensor_scatter_misc = TensorOpsMisc(seed=713, feature_size_a=[12, 3, 4, 3], feature_size_b=[3, 3])
    in_node_a = tensor_scatter_misc.get_placeholder("input_a")  # 12,3,4,5
    in_node_a_erf = tf.erf(in_node_a)
    in_node_b = tensor_scatter_misc.get_placeholder("input_b")
    a_reversed_seq = tf.reverse_sequence(in_node_a_erf, batch_axis=2, seq_axis=3, seq_lengths=[2, 1, 3, 2])
    reduced = tf.reduce_sum(tf.round(a_reversed_seq), axis=(0, 2))
    erfc_plus = tf.erfc(in_node_b) + reduced + tf.cast(tf.eye(3), dtype=tf.float64)
    '''
    # Can't feeze graphs with scatter because of the same issue as that with batch norm
    some_var = tf.Variable(tf.random_normal(shape=[12, 3, 3], dtype=tf.float64), name="some_3x3")
    scatter_add_var = tf.scatter_add(some_var, indices=[2, 1, 0, 0],
                                     updates=tf.constant(np.random.uniform(size=(4, 3, 3)), dtype=tf.float64))
    after_scatter = tf.reduce_sum(scatter_add_var, axis=0) + tf.log1p(erfc_plus)
    scatter_nd_var = tf.scatter_nd([[0], [1], [3], [2]], updates=tf.constant(np.random.uniform(size=(4, 3, 2))),
                                   shape=tf.constant([5, 3, 2]))
    out_node = tf.concat([tf.reshape(scatter_nd_var, shape=[10, 3]), after_scatter], axis=0, name="output")
    '''
    some_var = tf.Variable(tf.random_normal(shape=[3, 3], dtype=tf.float64))
    out_node = tf.add(tf.log1p(erfc_plus), some_var, name="output")

    placeholders = [in_node_a, in_node_b]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="tensor_ops_misc")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(tensor_scatter_misc.get_test_data()) \
        .build_save_frozen_graph()
示例#5
0
def test_unstack():
    arrs = tf.Variable(tf.constant(np.reshape(np.linspace(1, 25, 25), (5, 5))))
    unstack_list = tf.unstack(arrs, axis=0)
    out_node = tf.reduce_sum(unstack_list, axis=0, name="output")
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="unstack")
    tfp.set_placeholders([]) \
        .set_output_tensors([out_node]) \
        .set_test_data({}) \
        .build_save_frozen_graph()
示例#6
0
def test_stack():
    arrs = []
    for i in xrange(1, 5, 1):
        arrs.append(tf.Variable(tf.constant(5, dtype=tf.float32, shape=(1, 1), name=str(str(i) + '_num'))))
    out_node = tf.stack(arrs, 0, name='output')
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="stack")
    tfp.set_placeholders([]) \
        .set_output_tensors(predictions) \
        .set_test_data({}) \
        .build_save_frozen_graph()
示例#7
0
def test_mathtransform():
    ops = [
        "add"
        # , "add_n"
        ,
        "max",
        "min",
        "abs",
        "cos",
        "acos",
        "add",
        "max",
        "min",
        "abs",
        "ceil",
        "min"
        # , "cross"
        ,
        "exp",
        "log"
        # , "log1p"
        # , "mod"
        # , "mathmul"
        # , "cumprod"
        # , "cumsum"
        # , "erf"
        # , "count_nonzero"
        # , "greater"
        # , "greater_equal"
        # , "equal"
    ]
    math_transform = MathTransform(seed=19)
    in_node_0 = math_transform.get_placeholder("input_0", data_type=tf.float32)
    in_node_1 = math_transform.get_placeholder("input_1", data_type=tf.float32)
    k0 = tf.Variable(tf.random_normal([8, 8]), name="in0")
    constr = DifferentiableMathOps(in_node_0, in_node_1)

    for op in ops:
        print "Running " + op
        answer = constr.execute(op)
        print answer
        constr.set_a(answer)

    out_node = tf.rsqrt(answer, name="output")

    placeholders = [in_node_0, in_node_1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="transform_0")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(math_transform.get_test_data()) \
        .build_save_frozen_graph()
示例#8
0
def test_simple():
    simple_run = SimpleRun(seed=19)
    in_node_1 = tf.Variable([[0, 2], [1, -1]])
    out_node = tf.one_hot(in_node_1, 3, axis=1, off_value=-2.0, name="output")

    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="simple_run")
    tfp.set_placeholders([]) \
        .set_output_tensors(predictions) \
        .set_test_data(simple_run.get_test_data()) \
        .build_save_frozen_graph()
示例#9
0
def test_simple_while():
    i1 = tf.Variable(tf.constant(0), name='loop_var')
    c = lambda i: tf.less(i, 10)
    b = lambda i: tf.add(i, 1)
    r = tf.while_loop(c, b, [i1])
    out_node = tf.identity(r, name="output")
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="simple_while")
    tfp.set_placeholders([]) \
        .set_output_tensors(predictions) \
        .set_test_data({}) \
        .build_save_frozen_graph()
示例#10
0
def test_mathops_zero():
    mathops_0 = MathOpsZero(seed=19)
    in_node_0 = tf.Variable(tf.random_normal([3, 3]), name="in_0", dtype=tf.float32)
    n0 = tf.add(np.arange(-4., 5., 1.).astype(np.float32).reshape(3, 3), in_node_0)
    n1 = tf.abs(n0)
    n2 = tf.rsqrt(n1)
    out_node = tf.tanh(n2, name="output")
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_00")
    tfp.set_placeholders([]) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_0.get_test_data()) \
        .build_save_frozen_graph()
示例#11
0
def test_expand_dim():
    expand_dim_t = ExpandDimT(seed=19)
    in_node_0 = expand_dim_t.get_placeholder("input_0")
    k0 = tf.Variable(tf.random_normal([3, 1, 4], dtype=tf.float64), name="in0")
    in0_expanded = tf.expand_dims(in_node_0, axis=-2)
    out_node = tf.add(in0_expanded, k0, name="output")

    placeholders = [in_node_0]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="expand_dim")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(expand_dim_t.get_test_data()) \
        .build_save_frozen_graph()
示例#12
0
def test_mat_mul_order():
    simple_m = MatMulOrder(seed=713)
    in0 = simple_m.get_placeholder("input_0")
    in1 = simple_m.get_placeholder("input_1")
    k0 = tf.Variable(tf.random_normal([3, 3], dtype=tf.float64), name="in0")
    out_node = tf.matmul(k0, tf.matmul(in0, in1), name="output")

    placeholders = [in0, in1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="math_mul_order")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(simple_m.get_test_data()) \
        .build_save_frozen_graph()
示例#13
0
def test_mathops_two():
    ops = [
        "acos",
        "sin",
        "asin",
        "sinh",
        "floor",
        "asinh",
        "min",
        "cos",
        "add",
        "acosh",
        "atan",
        "atan2",
        "add",
        "elu",
        "cosh",
        "mod",
        "cross"
        # , "diagpart"
        # , "diag"
        ,
        "expm",
        "asinh",
        "atanh"
    ]
    mathops_2 = MathOpsTwo(seed=19)
    in_node_0 = mathops_2.get_placeholder("input_0", data_type=tf.float32)
    in_node_1 = mathops_2.get_placeholder("input_1", data_type=tf.float32)
    k0 = tf.Variable(tf.random_normal([8, 8]), name="in0")
    constr = DifferentiableMathOps(in_node_0, in_node_1)

    for op in ops:
        print("Running " + op)
        answer = constr.execute(op)
        print(answer)
        constr.set_a(answer)

    out_node = tf.floormod(constr.a, constr.b, name="output")

    placeholders = [in_node_0, in_node_1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_02")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_2.get_test_data()) \
        .build_save_frozen_graph()
示例#14
0
def test_mathops_nine():
    mathops_9 = MathOpsNine(seed=19)
    in_node_1 = mathops_9.get_placeholder("input_1")
    in_node_2 = mathops_9.get_placeholder("input_2")
    in_node_3 = mathops_9.get_placeholder("input_3")
    n1 = tf.nn.softsign(in_node_1)
    n2 = tf.nn.softplus(in_node_2)
    n3 = tf.concat([n1, n2, in_node_3], axis=0)
    n4 = tf.nn.softmax(n3)
    w = tf.Variable(tf.random_normal([10, 10], dtype=tf.float64), name="w")
    n5 = tf.nn.softmax(w)
    n6 = tf.nn.softmax_cross_entropy_with_logits(labels=n5, logits=n4)
    n7 = tf.nn.log_softmax(n6)
    n8 = tf.nn.sigmoid_cross_entropy_with_logits(labels=n5, logits=n4)
    n9 = tf.nn.weighted_cross_entropy_with_logits(targets=n5,
                                                  logits=n4,
                                                  pos_weight=10)

    out_node_1 = tf.identity(n7, name="output_1")
    out_node_2 = tf.identity(n8, name="output_2")
    out_node_3 = tf.identity(n8, name="output_3")
    placeholders = [in_node_1, in_node_2, in_node_3]
    predictions = [out_node_1, out_node_2, out_node_3]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_09")
    predictions_after_freeze = tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_9.get_test_data()) \
        .build_save_frozen_graph()
    print(predictions_after_freeze[0].shape)
    print(predictions_after_freeze[1].shape)
    print(predictions_after_freeze[2].shape)
示例#15
0
def test_mathops_seven():
    mathops_7 = MathOpsSeven(seed=19)
    in_node_1 = mathops_7.get_placeholder("input_1", data_type=tf.int32)
    in_node_2 = mathops_7.get_placeholder("input_2")
    w = tf.Variable(tf.random_normal([8, 10], dtype=tf.float64), name="w")
    b = tf.cast(tf.invert_permutation(in_node_1), dtype=tf.float64)
    n1 = tf.nn.xw_plus_b(in_node_2, w, b)
    n2 = tf.cast(tf.fill([10, 10], 1.2345), dtype=tf.float64)
    n3 = tf.add(n1, n2)
    n4 = tf.nn.relu6(n3)
    n5 = tf.nn.moments(n4, axes=[1, 0], keep_dims=True)
    n6 = tf.meshgrid(
        n5, tf.Variable(tf.random_normal([2, 1, 1], dtype=tf.float64)))
    n7 = tf.parallel_stack([n6[1], n6[0], n6[1]])  # (3,2,2)
    n8 = tf.nn.normalize_moments(n7[0], n7[1], n7[2], None)  # (2,2,2)
    out_node = tf.pad(n8,
                      tf.constant([[1, 1], [1, 1], [1, 1]]),
                      "REFLECT",
                      name="output")

    placeholders = [in_node_1, in_node_2]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_07")
    predictions_after_freeze = tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_7.get_test_data()) \
        .build_save_frozen_graph()
    print(predictions_after_freeze[0].shape)
示例#16
0
def test_mathops_ten():
    mathops_10 = MathOpsTen(seed=19)
    in_node_1 = mathops_10.get_placeholder("input_1")
    #in_node_2 = mathops_8.get_placeholder("input_2")
    #n0 = tf.is_finite(in_node_1)
    #n1 = tf.reduce_all(n0)
    #n2 = tf.cast(n0, dtype=tf.float64)
    #n3 = tf.cast(n1, dtype=tf.float64)
    n4 = tf.add(in_node_1, in_node_1)
    #n5 = tf.cast(tf.truncatediv(tf.cast(n4, dtype=tf.int32), 3), dtype=tf.float64)
    n6 = tf.reciprocal(n4)  # should be inf now
    #n7 = tf.cast(tf.is_inf(n6), dtype=tf.float64)
    #n8 = tf.cast(tf.is_nan(n6), dtype=tf.float64)
    n9 = tf.squared_difference(n4, n6)
    w = tf.Variable(tf.random_normal([4, 3], dtype=tf.float64), name="w")
    n10 = tf.reverse(w, axis=[-1])
    n11 = tf.add(n10, n9)
    #n12 = tf.reciprocal(tf.multiply(n11, [[0, 1, 1], [1, 1, 1], [0, 1, 0], [1, 0, 0]]))
    #n13 = tf.reduce_any(tf.is_inf(n12))
    #n14 = tf.cast(n13, dtype=tf.float64)

    out_node = tf.identity(n11, name="output")
    placeholders = [in_node_1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_11")
    predictions_after_freeze = tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_10.get_test_data()) \
        .build_save_frozen_graph()
    print(predictions_after_freeze[0].shape)
示例#17
0
def test_multiple_outs_b():
    multiple_out_test = MultipleOutsB(seed=913)
    in_node = multiple_out_test.get_placeholder("input_0", data_type=tf.float32)
    in_node_0 = in_node + tf.Variable(tf.zeros([2, 2, 2])) #Graph won't save without some variable present
    out_node_a = tf.unstack(in_node_0, axis=2,name='outputA') # 2 of size 2x2
    a_node = tf.add(in_node_0,out_node_a[0])
    out_node_b = tf.unstack(a_node,axis=0,name="outputB")

    placeholders = [in_node]
    predictions = [out_node_a[1], out_node_b] #out_node_b is a list

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="multiple_outs_b")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(multiple_out_test.get_test_data()) \
        .build_save_frozen_graph()
示例#18
0
def test_simple_transpose():
    simple_t = SimpleTranspose(seed=713)
    in0 = simple_t.get_placeholder("input")

    k0 = tf.Variable(tf.random_normal([3, 3], dtype=tf.float64), name="k0")
    in1 = tf.transpose(in0, name="input_1")
    out_node = tf.add(in1, k0, name="output")

    placeholders = [in0]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="transpose")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(simple_t.get_test_data()) \
        .build_save_frozen_graph()
示例#19
0
def test_mathops_five():
    mathops_5 = MathOpsFive(seed=19)
    in_node_1 = mathops_5.get_placeholder("input_1")
    in_node_2 = mathops_5.get_placeholder("input_2")
    k0 = tf.Variable(tf.random_normal([3, 2], dtype=tf.float64), name="in0")
    n0 = tf.gather(in_node_1, [1, 0], axis=-2)  # 2,4,2,2
    n1 = tf.gather_nd(n0, [[0, 2, 1], [0, 1, 0], [1, 3, 1]])  # 3,2
    out_node = tf.stack([n1, k0, in_node_2], axis=-1, name="output")  # 3, 2, 2

    placeholders = [in_node_1, in_node_2]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_05")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_5.get_test_data()) \
        .build_save_frozen_graph()
示例#20
0
def test_mathops_six():
    mathops_6 = MathOpsSix(seed=19)
    in_node_1 = mathops_6.get_placeholder("input_1")
    in_node_2 = mathops_6.get_placeholder("input_2")
    k0 = tf.Variable(tf.random_normal([3, 2], dtype=tf.float64), name="in0")
    n0 = tf.reduce_sum(in_node_1, axis=[0, 1], keep_dims=False)  # 3,2
    n1 = tf.reduce_max(in_node_2, keep_dims=True)  # 1,1
    n2 = tf.add(k0, n0)
    out_node = tf.add(n1, n2, name="output")

    placeholders = [in_node_1, in_node_2]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_06")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_6.get_test_data()) \
        .build_save_frozen_graph()
示例#21
0
def test_nontwod_zero():
    non_twod_0 = NonTwoDZero(seed=13)
    in_node = non_twod_0.get_placeholder("scalar", data_type=tf.float32)
    k0 = tf.Variable(tf.random_normal([2, 1]),
                     name="someweight",
                     dtype=tf.float32)
    a = tf.reduce_sum(in_node + k0)  # gives a scalar
    out_node = tf.reduce_sum(a + k0, name="output", axis=0)  # gives a vector

    placeholders = [in_node]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="non2d_0")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(non_twod_0.get_test_data()) \
        .build_save_frozen_graph()
示例#22
0
def test_mathops_one():
    mathops_1 = MathOpsOne(seed=19)
    in_node_0 = mathops_1.get_placeholder("input_0")
    in_node_1 = mathops_1.get_placeholder("input_1")
    n0 = tf.add(np.arange(-4., 5., 1.).astype(np.float64).reshape(3, 3), in_node_0)
    n1 = tf.abs(n0)
    n3 = tf.add(n1, tf.Variable(tf.random_normal([3, 3], dtype=tf.float64)))
    n4 = tf.floordiv(n3, in_node_1)
    out_node = tf.tanh(n4, name="output")

    placeholders = [in_node_0, in_node_1]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_01")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_1.get_test_data()) \
        .build_save_frozen_graph()
示例#23
0
def test_multiple_outs_a():
    multiple_out_test = MultipleOutsA(seed=913)
    in_node = multiple_out_test.get_placeholder("input_0",
                                                data_type=tf.float32)
    in_node_0 = in_node + tf.Variable(tf.zeros(
        [2, 3, 4]))  #Graph won't save without some variable present
    out_node_a = tf.unstack(in_node_0, axis=2, name="outputA")  # 4 of size 2x3
    out_node_b = tf.unstack(in_node_0, axis=1, name="outputB")  # 3 of size 2x4

    placeholders = [in_node]
    predictions = [out_node_a,
                   out_node_b]  #out_node_a and out_node_b are lists

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="multiple_outs_a")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(multiple_out_test.get_test_data()) \
        .build_save_frozen_graph()
示例#24
0
def test_assert_true():
    assertTrue = AssertTrue(seed=713)

    x = assertTrue.get_placeholder("input")
    k0 = tf.Variable(tf.random_normal([3, 3], dtype=tf.float64), name="k0")
    assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 100.), [k0])

    with tf.control_dependencies([assert_op]):
        in1 = tf.transpose(x, name="input_1")
    out_node = tf.add(in1, k0, name="output")

    placeholders = [x]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="assert_true")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(assertTrue.get_test_data()) \
        .build_save_frozen_graph()
示例#25
0
def test_nontwod_zero_a():
    non_twod_0_a = NonTwoDZeroA(seed=13)
    in_node_a = non_twod_0_a.get_placeholder("scalarA", data_type=tf.int32)
    in_node_b = non_twod_0_a.get_placeholder("scalarB", data_type=tf.int32)

    some_vector = tf.stack([in_node_a,
                            in_node_b])  # [2,] shape with value [5,2]
    i0 = tf.Variable(np.random.uniform(size=(3, 4)),
                     dtype=tf.float32)  # shape [3,4]
    out_node = tf.tile(i0, some_vector, name="output")

    placeholders = [in_node_a, in_node_b]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="non2d_0A")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(non_twod_0_a.get_test_data()) \
        .build_save_frozen_graph()
示例#26
0
def test_nontwod_one():
    non_twod_1 = NonTwoDOne(seed=13)
    in_node_a = non_twod_1.get_placeholder("scalar", data_type=tf.float32)
    in_node_b = non_twod_1.get_placeholder("vector", data_type=tf.float32)
    k0 = tf.Variable(tf.random_normal([2, 1]),
                     name="someweight",
                     dtype=tf.float32)

    i0 = tf.reshape(tf.reduce_sum(in_node_b), [])
    i1 = in_node_a + in_node_b + i0
    out_node = tf.matmul(tf.expand_dims(i1, 0), k0, name="output")

    placeholders = [in_node_a, in_node_b]
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="non2d_1")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(non_twod_1.get_test_data()) \
        .build_save_frozen_graph()
示例#27
0
def test_concat_one():
    concat_test = ConcatTest(seed=13)
    arrs = []
    for i in xrange(1, 5, 1):
        arrs.append(
            tf.Variable(
                tf.constant(5,
                            dtype=tf.float32,
                            shape=(1, 1),
                            name=str(str(i) + '_num'))))
    out_node = tf.concat(arrs, 0, name='output')

    placeholders = []
    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="concat")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(concat_test.get_test_data()) \
        .build_save_frozen_graph()
示例#28
0
def test_mathops_four():
    mathops_4 = MathOpsFour(seed=19)
    in_node_1 = mathops_4.get_placeholder("input_1")
    in_node_2 = mathops_4.get_placeholder("input_2")
    k0 = tf.Variable(tf.random_normal([8, 1, 8], dtype=tf.float64), name="in0")
    n1 = tf.concat([in_node_1, in_node_2], axis=-2)
    n3 = tf.reshape(n1, [8, 8, 8])
    n4 = tf.pow(n3, n3)
    n5 = tf.tan(n4)
    n6 = tf.negative(n5)
    n7 = tf.multiply(n6, n4)
    out_node = tf.subtract(n7, k0, name="output")

    placeholders = [in_node_1, in_node_2]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_04")
    tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_4.get_test_data()) \
        .build_save_frozen_graph()
示例#29
0
def test_simplecond():
    in0 = tf.Variable(np.linspace(1, 4, 4) + 1, name='greater')
    in1 = tf.Variable(np.linspace(1, 4, 4), name='lesser')

    def f1():
        return in0 / tf.Variable(2.0, name='div_f1_constant', dtype=tf.float64)

    def f2():
        return in1 * tf.Variable(4.0, name='mul_f2_constant', dtype=tf.float64)

    def check():
        return tf.reduce_sum(in0 - in1) < 2

    r_node = tf.cond(tf.reduce_sum(in0 - in1) < 2,
                     true_fn=lambda: f1(),
                     false_fn=lambda: f2(),
                     name='cond5')
    r2 = tf.cond(tf.reduce_sum(in0 - in1) < 2,
                 true_fn=lambda: f1(),
                 false_fn=lambda: f2(),
                 name='cond6')

    last_result = tf.add(r_node,
                         tf.constant(1.0, dtype=tf.float64),
                         name='first_output_input')
    last_result2 = tf.add(r2,
                          tf.constant(1.0, dtype=tf.float64),
                          name='second_output_input')
    out_node = tf.add(last_result, last_result2, name='output')

    predictions = [out_node]

    # Run and persist
    tfp = TensorFlowPersistor(save_dir="simple_cond")
    tfp.set_placeholders([]) \
        .set_output_tensors(predictions) \
        .set_test_data({}) \
        .build_save_frozen_graph()
示例#30
0
 def _CompareNorm(matrix):
     # tf_matrix = tf.Variable(matrix,name="input")
     tf.reset_default_graph()
     in_node = tf.placeholder("float", matrix.shape, name="input")
     in0 = tf.Variable(tf.random_normal(matrix.shape),
                       name="in0",
                       dtype=tf.float32)
     tf_matrix = in_node + in0
     tf_norm = linalg_ops.norm(tf_matrix,
                               ord=ord_,
                               axis=axis_,
                               keep_dims=keep_dims_,
                               name="norm_op")
     out_node = tf.identity(tf_norm, name="output")
     init = tf.global_variables_initializer()
     sess = tf.Session()
     sess.run(init)
     tfp = TensorFlowPersistor(save_dir=save_dir_)
     tfp.set_placeholders([in_node]) \
         .set_training_sess(sess) \
         .set_output_tensors([out_node]) \
         .set_test_data({"input": matrix}) \
         .build_save_frozen_graph()