Exemple #1
0
def test_module_rnn():
    """ Module + RNN integration
    """
    # test wrapping module around RNN because it has input dependencies that might not be given in the constructor
    x1 = tx.Input(tf.ones([1, 2, 3]), n_units=3, name="x1")
    x2 = tx.Input(tf.ones([1, 2, 3]), n_units=3, name="x2")
    rnn1 = tx.RNN(x1,
                  cell_config=tx.LSTMCell.config(n_units=4),
                  n_units=4,
                  stateful=False)
    rnn2 = tx.RNN(x1,
                  cell_config=tx.LSTMCell.config(n_units=4),
                  n_units=4,
                  stateful=False)

    out = tx.Concat(rnn1, rnn2)

    # add previous state as a dependency to a module
    m = tx.Module(inputs=x1,
                  output=out,
                  dependencies=rnn1.previous_state + rnn2.previous_state)

    m2 = m.reuse_with(x2)
    var_layers = set()
    for node in m2.graph.dependency_iter():
        if isinstance(node, tx.VariableLayer):
            var_layers.add(node)

    assert var_layers == set(rnn1.previous_state + rnn2.previous_state)
    assert tx.tensor_equal(m(), m2())
Exemple #2
0
def test_module_shape():
    x = tx.Input(n_units=3)
    t = tx.Transpose(x, n_units=3)
    mul = t * 2
    assert mul.shape == [3, 3]
    m = tx.Module(output=mul, inputs=x)
    assert m.n_units == 3
    m()
Exemple #3
0
def test_build_graph():
    x1 = tx.Input(n_units=1000, constant=False, dtype=tf.float32)
    x2 = tx.Input(init_value=tf.ones([1, 3]), dtype=tf.float32, constant=True)

    y10 = tx.Linear(x1, n_units=3)
    y11 = tx.Activation(y10)
    y1 = tx.Module(x1, y11)
    y2 = tx.Add(y1, x2)
    output = y2

    graph = Graph.build(inputs=None, outputs=[y1, y2])
    # module condenses 2 nodes so it's 4 and not 6
    assert len(graph.nodes) == 4

    @tf.function
    def simple_graph(in0):
        x1.value = in0
        return y2()

    simple_graph_2 = Graph.build(inputs=[x1, x2], outputs=y2)
    simple_graph_2 = tf.function(simple_graph_2)
    g = Graph.build(inputs=[x1, x2], outputs=y2)
    y2fn = y2.as_function()
    data = tf.ones([256, 1000])
    x1.value = data

    compiled_fn = g.as_function(ord_inputs=x1, ord_outputs=output)

    assert tx.tensor_equal(compiled_fn(data), y2fn())
    assert tx.tensor_equal(compiled_fn(data), simple_graph_2()[0])

    from timeit import timeit

    def update_run():
        x1.value = tf.random.uniform([256, 1000])
        return y2fn()

    n = 1000
    t_update_run = timeit(update_run, number=n)
    t_generated = timeit(lambda: compiled_fn(tf.random.uniform([256, 1000])),
                         number=n)
    t_compile_value_set = timeit(
        lambda: simple_graph(tf.random.uniform([256, 1000])), number=n)
    t_graph_call_tf = timeit(
        lambda: simple_graph_2(tf.random.uniform([256, 1000])), number=n)

    assert t_generated < t_update_run
    assert t_generated < t_compile_value_set
    assert t_generated < t_graph_call_tf
    assert t_update_run > t_compile_value_set

    o1 = compiled_fn(tf.random.uniform([256, 1000]))
    o2 = compiled_fn(tf.random.uniform([256, 1000]))
    assert not tx.tensor_equal(o1, o2)
Exemple #4
0
def test_biRNN():
    # bidirectional RNN
    n_features = 5
    embed_size = 4
    hidden_dim = 3
    seq_size = 6
    batch_size = 2

    inputs = tx.Input(np.random.random([batch_size, seq_size]),
                      n_units=seq_size,
                      dtype=tf.int32)
    lookup = tx.Lookup(inputs,
                       seq_size=seq_size,
                       embedding_shape=[n_features, embed_size])
    seq = lookup.permute_batch_time()

    rnn_proto = tx.RNNCell.config(n_units=hidden_dim)
    rnn0 = tx.RNN(seq,
                  cell_config=rnn_proto,
                  stateful=False,
                  return_state=True)

    # because a stateful rnn0 has a variable layer as input as well
    rnn_m0 = tx.Module(inputs=rnn0.inputs, output=rnn0)

    rnn1 = rnn0.reuse_with(seq,
                           reverse=True,
                           stateful=False,
                           return_state=True)
    # this solves rnn output multiple tensors

    r01 = rnn_m0.compute(seq(), rnn0.previous_state[0]())
    rnn0.reset()
    r02 = rnn0()

    assert tx.tensor_equal(r01[0], r02[0])

    rnn0_0 = rnn0[0]
    rnn1_0 = rnn1[0]
    rnn0 = tx.Wrap(rnn0, wrap_fn=lambda y: y[0], n_units=rnn0.n_units)
    rnn1 = tx.Wrap(rnn1, wrap_fn=lambda y: y[0], n_units=rnn1.n_units)

    rnn0_tensor = rnn0()
    rnn1_tensor = rnn1()
    rnn0_0_tensor = rnn0_0()

    print(rnn0_tensor.shape)
    print(rnn0_0_tensor.shape)
Exemple #5
0
def test_module_reuse_order():
    x1 = tx.Input([[2.]], n_units=1, name="x1")
    x2 = tx.Input([[2.]], n_units=1, name="x2")
    x3 = tx.Input([[1.]], n_units=1, name="x3")

    h = tx.Add(x2, x3)
    y = tx.Add(x1, h)

    module = tx.Module(inputs=[x1, x2, x3], output=y)

    x1_ = tx.Constant([[2.]], name="x1b")
    x2_ = tx.Constant([[2.]], name="x2b")

    m2 = module.reuse_with(x1_, x2_)

    m1 = module()
    m2 = m2()

    assert tx.tensor_equal(m1, m2)
Exemple #6
0
def test_module():
    l1 = tx.Input([[1]], n_units=1, dtype=tf.float32)
    l2 = tx.Input([[1]], n_units=1, dtype=tf.float32)
    l3 = tx.layer(n_units=1)(lambda x1, x2: tf.add(x1, x2))(l1, l2)
    l4 = tx.layer(n_units=1)(lambda x1, x2: tf.add(x1, x2))(l1, l2)
    l5 = tx.Linear(l4, 1)
    in1 = tx.Input([[1]], n_units=1, dtype=tf.float32)
    l7 = tx.layer(n_units=1)(lambda x1, x2: tf.add(x1, x2))(l3, in1)
    l8 = tx.layer(n_units=1)(lambda x1, x2: tf.add(x1, x2))(l7, l5)

    in2 = tx.Input([[1]], n_units=1, dtype=tf.float32, constant=False)
    in3 = tx.Input([[1]], n_units=1, dtype=tf.float32)

    m = tx.Module([l1, l2, in1], l8)
    with tf.name_scope("module_reuse"):
        m2 = m.reuse_with(in2, in3, in1)

    assert tx.tensor_equal(m(), m2())
    in2.value = [[3]]
    assert not tx.tensor_equal(m(), m2())
Exemple #7
0
def test_linear_save(tmp_path):
    tmp_path = tmp_path.joinpath("linear")
    save_path = str(tmp_path)

    x = tx.Input(init_value=tf.ones([2, 2]), n_units=2)
    linear = tx.Linear(x, n_units=4)
    graph = tx.Graph.build(inputs=None, outputs=linear)
    assert len(graph.in_nodes) == 1

    tf.saved_model.save(linear, save_path)
    linear_loaded = tf.saved_model.load(save_path)
    module = tx.Module(x, linear)

    # I could build the graph for linear and export the function from
    # the graph
    tf.saved_model.save(module, save_path)
    loaded = tf.saved_model.load(save_path)

    assert tx.tensor_equal(loaded(), linear())
    assert tx.tensor_equal(loaded(), module())
Exemple #8
0
def test_module_gate():
    """ Module + Gate Integration
    """
    x1 = tx.Input([[1, 1, 1, 1]], n_units=4, dtype=tf.float32)
    x2 = tx.Input([[1, 1]], n_units=2, dtype=tf.float32)
    x1 = tx.Add(x1, x1)

    gate = tx.Gate(input_layer=x1, gate_input=x2, gate_fn=tf.sigmoid)
    gate_module = tx.Module([x1, x2], gate)

    x3 = tx.Input([[1, 1, 1, 1]], n_units=4, dtype=tf.float32)
    x4 = tx.Input([[1, 1]], n_units=2, dtype=tf.float32)

    m2 = gate_module.reuse_with(x3, x4)

    result1 = gate_module()
    result2 = m2()
    result3 = gate_module.compute(x3, x4)

    assert tx.tensor_equal(result1, result2 * 2)
    assert tx.tensor_equal(result2, result3)
Exemple #9
0
def test_module_with_attention():
    """ Module + Attention integration
    This also tests Graph indirectly to check if we can add layers
    whose input layers are the same object (e.g. in self-attention)
    """

    x1 = tx.Input(tf.ones([1, 2, 3]), n_units=3, name="x1")
    rnn1 = tx.RNN(x1,
                  cell_config=tx.LSTMCell.config(n_units=4),
                  n_units=4,
                  stateful=False)
    att = tx.MHAttention(rnn1, rnn1, rnn1, n_units=3)
    m = tx.Module(inputs=x1, output=att, dependencies=rnn1.previous_state)
    g = tx.Graph.build(inputs=x1, outputs=m, add_missing_inputs=True)
    fn = g.as_function(ord_inputs=x1, ord_outputs=m)
    # this returns a tuple
    out1 = g.compute(tf.ones([1, 2, 3]))
    # this returns the function result
    out2 = fn(tf.ones([1, 2, 3]))

    assert tx.tensor_equal(out1[0], out2)
Exemple #10
0
def test_linear_graph_module_integration(tmp_path):
    tmp_path = tmp_path.joinpath("linear")
    save_path = str(tmp_path)

    x = tx.Input(init_value=tf.ones([2, 2], dtype=tf.float32))
    # x = tx.Constant(tf.constant([[32.]]), n_units=1)
    x = tx.Linear(x, n_units=x.n_units)
    linear = tx.Linear(x, n_units=4)
    graph = tx.Graph.build(inputs=None, outputs=linear)
    module = tx.Module(inputs=None, output=linear)

    assert len(module.inputs) == 1
    assert module.inputs == list(graph.in_nodes)
    assert len(graph.in_nodes) == 1

    tf.saved_model.save(module, save_path)
    module_loaded = tf.saved_model.load(save_path)
    assert tx.tensor_equal(module_loaded(), module())

    tf.saved_model.save(linear, save_path)
    linear_loaded = tf.saved_model.load(save_path)
    assert tx.tensor_equal(module_loaded(), linear_loaded())