コード例 #1
0
def test_rnn_cell():
    n_inputs = 3
    n_units = 4
    batch_size = 2
    inputs = tx.Input(n_units=n_inputs)

    rnn0 = tx.RNNCell(inputs, n_units)

    # Keras RNN cell
    rnn1 = SimpleRNNCell(n_units)
    state = rnn1.get_initial_state(inputs, batch_size=1)
    assert tx.tensor_equal(state, rnn0.previous_state[0]())

    inputs.value = tf.ones([batch_size, n_inputs])
    res1 = rnn1(inputs, (state, ))

    rnn1.kernel = rnn0.layer_state.w.weights
    rnn1.bias = rnn0.layer_state.w.bias
    rnn1.recurrent_kernel = rnn0.layer_state.u.weights

    res2 = rnn1(inputs, (state, ))
    assert not tx.tensor_equal(res1[0], res2[0])
    assert not tx.tensor_equal(res1[1], res2[1])

    res0 = rnn0()
    assert tx.tensor_equal(res2[0], res0)
コード例 #2
0
def test_linear():
    inputs = tx.Constant(tf.ones([2, 4]), dtype=tf.float64)
    inputs2 = inputs * 2

    linear = tx.Linear(inputs, n_units=8, dtype=tf.float64)

    w = linear.weights
    b = linear.bias

    assert w.shape == [4, 8]
    assert b.shape == [8]
    assert len(linear.trainable_variables) == 2

    t1 = linear()
    t2 = linear()

    assert tx.tensor_equal(t1, t2)

    linear2 = tx.Linear(linear.inputs[0],
                        8,
                        share_state_with=linear,
                        dtype=tf.float64)
    t3 = linear2()
    assert tx.tensor_equal(t1, t3)

    linear = tx.Linear(inputs, 8, dtype=tf.float64)
    linear2 = linear.reuse_with(inputs2)

    assert linear.weights is linear2.weights
    assert linear.bias is linear2.bias

    assert tx.tensor_equal(linear() * 2, linear2())
コード例 #3
0
def test_input_value():
    inputs = tx.Input(n_units=4, dtype=tf.int32, constant=False)
    assert tx.tensor_equal(inputs.value, tf.zeros([1, 4], dtype=tf.int32))

    with pytest.raises(ValueError):
        inputs.value = np.ones([2, 3], dtype=np.int32)

    inputs.value = np.ones([2, 4], dtype=np.int32)
    assert inputs.value is not None
    assert inputs() is not None
    assert inputs().dtype == tf.int32

    # test sparse input
    inputs = tx.Input(n_units=4, n_active=2, dtype=tf.int64, constant=False)
    assert tx.tensor_equal(inputs.value, tf.zeros([0, 2], dtype=tf.int64))

    with pytest.raises(ValueError) as ve:
        inputs.value = [[0, 2, 2]]
        assert "Invalid shape" in str(ve)

    inputs.value = [[0, 2]]
    # create an equivalent sparse input
    sp_input = inputs()
    assert isinstance(sp_input, tf.SparseTensor)
    inputs2 = tx.Input(n_units=4, init_value=sp_input)

    dense_value = tf.sparse.to_dense(inputs())
    dense_value2 = tf.sparse.to_dense(inputs2())
    expected = tf.constant([[1, 0, 1, 0]], dtype=np.int64)
    assert tx.tensor_equal(expected, dense_value)
    assert tx.tensor_equal(dense_value, dense_value2)
コード例 #4
0
def test_linear_rank3():
    val = tf.constant([[[1], [1]], [[2], [2]]])
    x1 = tx.Input(val, dtype=tf.float32)
    x2 = tx.Transpose(x1)

    assert val.shape[1:] == x1.shape[1:]

    x1_flat = tx.Reshape(x1, [-1, 1])

    linear1 = tx.Linear(x1, n_units=2)
    linear2 = tx.Linear(x2,
                        weights_shape=[2, 1],
                        weights=linear1.weights,
                        transpose_weights=True)

    # we cant do this because it changes the definition
    # of the layer (n_units etc)
    with pytest.raises(ValueError):
        linear1.reuse_with(x2, transpose_weights=True)
        pytest.fail(
            "can't reuse with transpose weights while changing the layer definition"
        )

    linear_flat = linear1.reuse_with(x1_flat, shape=(4, 2))
    x1_tensor = x1()
    new_shape = x1_tensor.shape[:-1] + [2]

    linear_flat = tx.Reshape(linear_flat, new_shape)

    assert tx.tensor_equal(linear1(), linear_flat())
    assert tx.tensor_equal(tf.shape(linear2()), [1, 2, 1])
コード例 #5
0
def test_conv1d():
    num_filters = 2
    input_dim = 4
    seq_size = 3
    batch_size = 2
    filter_size = 2

    filter_shape = [filter_size, input_dim, num_filters]

    x = tf.ones([batch_size, seq_size, input_dim])
    x_layer = tx.Constant(x, input_dim)

    filters = tf.ones(filter_shape)
    conv_layer = tx.Conv1D(x_layer, num_filters, filter_size, filters=filters)
    conv = tf.nn.conv1d(input=x,
                        filters=filters,
                        stride=1,
                        padding="SAME",
                        data_format="NWC")

    output = conv_layer()
    assert tx.tensor_equal(conv, output)
    assert tx.tensor_equal(tf.shape(conv_layer.filters),
                           [filter_size, input_dim, num_filters])
    assert tx.tensor_equal(tf.shape(output),
                           [batch_size, seq_size, num_filters])
コード例 #6
0
def test_lstm_cell():
    n_inputs = 4
    n_hidden = 2
    batch_size = 2

    inputs = tx.Input(np.ones([batch_size, n_inputs], np.float32),
                      n_units=n_inputs,
                      constant=True)
    rnn1 = tx.LSTMCell(inputs, n_hidden, gate_activation=tf.sigmoid)
    previous_state = (None, rnn1.state[-1]())
    rnn2 = rnn1.reuse_with(inputs, *previous_state)

    # if we don't wipe the memory, memory will be reused
    previous_state = (None, tx.LSTMCell.zero_state(rnn1.n_units))
    rnn3 = rnn1.reuse_with(inputs, *previous_state)
    rnn4 = rnn1.reuse_with(inputs)

    res1 = rnn1()
    res2 = rnn2()
    res3 = rnn3()
    res4 = rnn4()

    assert (batch_size, n_hidden) == np.shape(res1)
    assert tx.tensor_equal(res1, res3)
    assert not tx.tensor_equal(res1, res2)
    assert tx.tensor_equal(res1, res4)
コード例 #7
0
ファイル: test_train.py プロジェクト: tensorx/tensorx
def test_model_run():
    data1 = tf.constant([[1., 1.]])

    x = tx.Input(n_units=2, name="x", constant=False)
    labels = tx.Input(n_units=2, name="y_", constant=False)
    y = tx.Linear(x, 2, name="y")
    out1 = tx.Activation(y, tf.nn.softmax)
    out2 = tx.Activation(y, tf.nn.softmax)

    @tx.layer(n_units=2, name="loss")
    def loss(pred, labs):
        return tf.losses.categorical_crossentropy(labs, pred)

    model = tx.Model(run_inputs=x,
                     run_outputs=[out1, out2],
                     train_inputs=[x, labels],
                     train_outputs=out1,
                     train_loss=loss(out1, labels))

    model.set_optimizer(tf.optimizers.SGD, lr=0.5)

    result1 = model.run({x: data1})
    result2 = model.run([data1])

    assert tx.tensor_equal(result1[0], result2[0])
    assert tx.tensor_equal(result1[1], result2[1])

    result3 = model.run({x: data1}, compiled_graph=True)
    assert tx.tensor_equal(result3[0], result2[0])
    assert tx.tensor_equal(result3[1], result2[1])
コード例 #8
0
def test_reuse_dropout():
    x1 = tx.Constant(np.ones(shape=[2, 4]), dtype=tf.float32)
    x2 = tx.Activation(x1)
    drop1 = tx.Dropout(x2, probability=0.5, locked=True)

    assert len(drop1.inputs) == 2
    assert drop1.inputs[0] is x2
    assert drop1.inputs[-1] is drop1.layer_state.mask

    # shared state overrides mask?
    _, mask = tx.dropout(x2, return_mask=True)
    drop2 = drop1.reuse_with(x2, mask)

    assert len(drop2.inputs) == 2
    assert drop2.inputs[0] is x2
    assert drop2.inputs[-1] is drop2.layer_state.mask

    assert not tx.tensor_equal(drop1(), drop2())

    graph = tx.Graph.build(inputs=None, outputs=[drop1, drop2])

    out1, out2 = graph()
    assert tx.tensor_equal(out1, out2)

    drop1 = tx.Dropout(x2, probability=0.5)
    drop2 = drop1.reuse_with(x1)

    graph.eval(drop1, drop2)
コード例 #9
0
def test_batch_norm():
    v = tf.random.uniform([3, 4])
    x = tx.Input(v, dtype=tf.float32)

    bn = tx.BatchNorm(x, offset=True, scale=True)
    moving_mean = bn.moving_mean
    moving_variance = bn.moving_variance

    before = bn.moving_mean.value()
    bn()
    after = bn.moving_mean.value()
    assert not tx.tensor_equal(before, after)

    x.value = tf.random.uniform([3, 4])
    bn = bn.reuse_with(x, training=False)
    before = bn.moving_mean.value()
    bn()
    after = bn.moving_mean.value()
    assert tx.tensor_equal(before, after)
    assert moving_mean is bn.moving_mean
    assert moving_variance is bn.moving_variance

    bn = bn.reuse_with(x, training=True)
    x.value = tf.random.uniform([3, 4])
    before = bn.moving_mean.value()
    bn()
    after = bn.moving_mean.value()
    assert not tx.tensor_equal(before, after)
コード例 #10
0
ファイル: test_utils.py プロジェクト: tensorx/tensorx
def test_layer_graph():
    data = [[1., 2.]]

    in1 = tx.Input(n_units=2, name="in1", constant=False)
    in2 = tx.Input(n_units=2, name="in2", constant=False)
    linear = tx.Linear(in1, 1, add_bias=False)
    graph = tx.Graph.build(inputs=in1, outputs=linear)

    assert in1 in graph.in_nodes

    with pytest.raises(ValueError):
        tx.Graph.build(inputs=[in1, in2], outputs=linear)
        pytest.fail(
            "Expected ValueError: some inputs are not connected to anything")

    with pytest.raises(ValueError):
        tx.Graph.build(inputs=[in2], outputs=linear)
        pytest.fail(
            "Expected ValueError: inputs specified but dependencies are missing"
        )

    w = tf.matmul(data, linear.weights)

    in1.value = data
    r1 = linear()
    r2 = graph(data)

    assert tx.tensor_equal(r2[0], w)
    assert tx.tensor_equal(r1, w)
コード例 #11
0
def test_conv1d():
    n_features = 3
    embed_size = 128
    seq_size = 3
    batch_size = 2

    inputs = tx.Constant(np.random.random([batch_size, seq_size]),
                         n_units=seq_size,
                         dtype=tf.int32)
    emb = tx.Lookup(inputs,
                    seq_size=seq_size,
                    embedding_shape=[n_features, embed_size])
    seq = emb()

    n_units = 100
    filter_size = 4
    cnn = tf.keras.layers.Conv1D(filters=n_units,
                                 kernel_size=filter_size,
                                 padding='same')

    res = cnn(seq)

    cnn2 = tx.Conv1D(emb, n_units=100, filter_size=filter_size)
    res2 = cnn2(seq)

    assert len(cnn.variables) == len(cnn.variables)

    cnn.kernel = cnn2.filters
    cnn.bias = cnn2.bias
    res3 = cnn(seq)

    assert not tx.tensor_equal(res, res2)
    assert tx.tensor_equal(res2, res3)
コード例 #12
0
def test_rnn_cell():
    n_inputs = 4
    n_hidden = 2
    batch_size = 2

    x = tx.Input(init_value=tf.ones([batch_size, n_inputs]), constant=False)
    rnn1 = tx.RNNCell(x, n_hidden)

    assert rnn1.shape[0] == x.shape[0]
    assert rnn1.shape[-1] == rnn1.n_units

    state = rnn1.state
    state = state[0]()

    rnn_2 = rnn1.reuse_with(x, state)
    rnn_3 = rnn1.reuse_with(x)

    with pytest.raises(TypeError):
        tx.RNNCell(x, n_hidden, share_state_with=x)
        pytest.fail(
            "Type Error Expected: inputs cannot share state with RNNCell")

    res1 = rnn1()
    res2 = rnn_2()
    res3 = rnn_3()

    assert (batch_size, n_hidden) == np.shape(res1)
    assert tx.tensor_equal(res1, res3)
    assert not tx.tensor_equal(res1, res2)
コード例 #13
0
ファイル: test_train.py プロジェクト: tensorx/tensorx
def test_set_optimizer():
    x = tx.Input(n_units=2, name="x", constant=False)
    labels = tx.Input(n_units=2, name="labels", constant=False)
    y = tx.Linear(x, 2, name="y")
    out1 = tx.Activation(y, tf.nn.softmax)
    out2 = tx.Activation(y, tf.nn.softmax)

    @tx.layer(n_units=2, name="loss")
    def loss(pred, labs):
        return tf.losses.categorical_crossentropy(labs, pred)

    model = tx.Model(run_inputs=x,
                     run_outputs=[out1, out2],
                     train_inputs=[x, labels],
                     train_outputs=[out2, out1],
                     train_loss=loss(out1, labels))

    lr = tx.Param(0.5)
    opt = model.set_optimizer(tf.optimizers.SGD,
                              learning_rate=lr,
                              clipnorm=0.1)

    assert isinstance(opt, tf.optimizers.Optimizer)

    assert model.optimizer.get_config()["learning_rate"] == 0.5

    data1 = [[1., 1.], [1., 1.]]
    data2 = tf.constant([[0., 1.], [0., 1.]])
    params = model.optimizer_params[model.optimizer]
    data_dict, params_dict = tx.Model.parse_input(
        {
            x: data1,
            "learning_rate": 0.2
        }, model.run_graph.in_nodes, params)
    assert len(data_dict) == 1
    assert len(params_dict) == 1
    assert model.optimizer_params[opt]["learning_rate"] is lr

    result1 = model.train_step({x: data1, labels: data2})
    result2 = model.train_step([data1, data2])

    assert len(result1) == 3
    assert len(result2) == 3
    assert tf.reduce_all(tf.less(result2[-1], result1[-1]))

    result1 = model.run({x: np.array(data1, dtype=np.float32)})
    result2 = model.run([data1])
    result3 = model.run(np.array(data1, np.float32))

    x.value = data1
    o2 = out2()
    o1 = out1()

    result4 = (o2, o1)

    for i in range(2):
        assert tx.tensor_equal(result1[i], result2[i])
        assert tx.tensor_equal(result1[i], result3[i])
        assert tx.tensor_equal(result1[i], result4[i])
コード例 #14
0
def test_gru_cell():
    n_inputs = 3
    n_units = 4
    batch_size = 1
    inputs = tx.Input(n_units=n_inputs)

    gru0 = tx.GRUCell(inputs,
                      n_units,
                      activation=tf.tanh,
                      gate_activation=tf.sigmoid)

    # applies gate after matrix multiplication and uses
    # recurrent biases, this makes it compatible with cuDNN
    # implementation
    gru1 = GRUCell(n_units,
                   activation='tanh',
                   recurrent_activation='sigmoid',
                   reset_after=False,
                   implementation=1,
                   use_bias=True)

    assert not hasattr(gru1, "kernel")

    state0 = [s() for s in gru0.previous_state]
    #  get_initial_state from keras returns either a tuple or a single
    #  state see test_rnn_cell, but the __call__ API requires an iterable
    state1 = gru1.get_initial_state(inputs, batch_size=1)

    assert tx.tensor_equal(state1, state0[0])

    inputs.value = tf.ones([batch_size, n_inputs])

    res1 = gru1(inputs, state0)
    res1_ = gru1(inputs, state0)

    for r1, r2 in zip(res1, res1_):
        assert tx.tensor_equal(r1, r2)

    # the only difference is that keras kernels are fused together
    kernel = tf.concat([w.weights.value() for w in gru0.layer_state.w],
                       axis=-1)
    recurrent_kernel = tf.concat([u.weights for u in gru0.layer_state.u],
                                 axis=-1)
    bias = tf.concat([w.bias for w in gru0.layer_state.w], axis=-1)

    assert tx.same_shape(kernel, gru1.kernel)
    assert tx.same_shape(recurrent_kernel, gru1.recurrent_kernel)
    assert tx.same_shape(bias, gru1.bias)

    gru1.kernel = kernel
    gru1.recurrent_kernel = recurrent_kernel
    gru1.bias = bias

    res2 = gru1(inputs, state0)
    for i in range(len(res1)):
        assert not tx.tensor_equal(res1[i], res2[i])
    res0 = gru0()
    # res0_ = gru0.state[0]()
    assert tx.tensor_equal(res0, res2[0])
コード例 #15
0
def test_variable_layer():
    input_layer = tx.Input([[1]], n_units=1, dtype=tf.float32)
    var_layer = tx.VariableLayer(input_layer, dtype=tf.float32)

    init_value = var_layer.variable.value()
    after_update = var_layer()

    assert not tx.tensor_equal(init_value, after_update)
    assert tx.tensor_equal(after_update, var_layer.variable.value())
コード例 #16
0
def test_rnn_cell_drop():
    n_hidden = 4
    inputs1 = tx.Input(np.ones([2, 100]), dtype=tf.float32)
    inputs2 = tx.Input(np.ones([2, 100]), dtype=tf.float32)

    with tf.name_scope("wtf"):
        rnn1 = tx.RNNCell(inputs1,
                          n_hidden,
                          x_dropout=0.5,
                          r_dropout=0.5,
                          u_dropconnect=0.5,
                          w_dropconnect=0.5,
                          regularized=True)
    rnn2 = rnn1.reuse_with(inputs2, rnn1)
    rnn3 = rnn1.reuse_with(inputs2, rnn1)
    rnn4 = rnn1.reuse_with(inputs2, None, regularized=False)
    rnn5 = rnn4.reuse_with(inputs2, None, regularized=True)

    r1, r2, r3, r4, r5 = rnn1(), rnn2(), rnn3(), rnn4(), rnn5()
    # w is a linear layer from the input but a regularized layer applies dropout to the input, so we have a dropout
    # in between

    # without a shared state object, we couldn't rewire graphs, in the case of non-eager we can share a tensor
    # that is already wired with something (it takes the shape of the input of one layer and creates a mask tensor
    # shared across dropout instances
    # Linear layers should have shared states as well, in this case sharing the weights
    # dropout_state1 = rnn1.w.input_layers[0].layer_state
    # dropout_state2 = rnn2.w.input_layers[0].layer_state
    # dropout_state3 = rnn3.w.input_layers[0].layer_state

    # mask1, mask2, mask3 = dropout_state1.mask, dropout_state2.mask, dropout_state3

    assert tx.tensor_equal(r2, r3)
    assert not tx.tensor_equal(r2, r4)
    assert not tx.tensor_equal(r4, r5)

    assert rnn1.dropout_locked
    assert rnn2.dropout_locked

    assert hasattr(rnn1, "w")
    assert hasattr(rnn2, "w")

    w1: tx.Layer = getattr(rnn1, "w")
    w2: tx.Layer = getattr(rnn2, "w")

    assert isinstance(w1, tx.DropConnect)

    state1, state2 = w1.layer_state, w2.layer_state

    assert hasattr(state1, "weight_mask")
    assert hasattr(state2, "weight_mask")

    # dropout locked == true
    mask1 = getattr(state1, "weight_mask")
    mask2 = getattr(state2, "weight_mask")

    assert tx.tensor_equal(mask1, mask2)
コード例 #17
0
def test_build_graph():
    x1 = tx.Input(n_units=1000, constant=False, dtype=tf.float32)
    x2 = tx.Input(init_value=tf.ones([1, 3]), dtype=tf.float32, constant=True)

    y10 = tx.Linear(x1, n_units=3)
    y11 = tx.Activation(y10)
    y1 = tx.Module(x1, y11)
    y2 = tx.Add(y1, x2)
    output = y2

    graph = Graph.build(inputs=None, outputs=[y1, y2])
    # module condenses 2 nodes so it's 4 and not 6
    assert len(graph.nodes) == 4

    @tf.function
    def simple_graph(in0):
        x1.value = in0
        return y2()

    simple_graph_2 = Graph.build(inputs=[x1, x2], outputs=y2)
    simple_graph_2 = tf.function(simple_graph_2)
    g = Graph.build(inputs=[x1, x2], outputs=y2)
    y2fn = y2.as_function()
    data = tf.ones([256, 1000])
    x1.value = data

    compiled_fn = g.as_function(ord_inputs=x1, ord_outputs=output)

    assert tx.tensor_equal(compiled_fn(data), y2fn())
    assert tx.tensor_equal(compiled_fn(data), simple_graph_2()[0])

    from timeit import timeit

    def update_run():
        x1.value = tf.random.uniform([256, 1000])
        return y2fn()

    n = 1000
    t_update_run = timeit(update_run, number=n)
    t_generated = timeit(lambda: compiled_fn(tf.random.uniform([256, 1000])),
                         number=n)
    t_compile_value_set = timeit(
        lambda: simple_graph(tf.random.uniform([256, 1000])), number=n)
    t_graph_call_tf = timeit(
        lambda: simple_graph_2(tf.random.uniform([256, 1000])), number=n)

    assert t_generated < t_update_run
    assert t_generated < t_compile_value_set
    assert t_generated < t_graph_call_tf
    assert t_update_run > t_compile_value_set

    o1 = compiled_fn(tf.random.uniform([256, 1000]))
    o2 = compiled_fn(tf.random.uniform([256, 1000]))
    assert not tx.tensor_equal(o1, o2)
コード例 #18
0
def test_rnn_layer_config():
    x1 = tx.Input(init_value=tf.ones([2, 2]), n_units=2)
    x_config = x1.config
    x2 = x_config()
    assert tx.tensor_equal(x1(), x2())

    rnn_cell = tx.RNNCell(input_layer=x1, n_units=3)
    rnn_proto = rnn_cell.config
    rnn_cell2 = rnn_proto(x1)

    assert tx.same_shape(rnn_cell(), rnn_cell2())
    assert not tx.tensor_equal(rnn_cell(), rnn_cell2())
コード例 #19
0
ファイル: test_utils.py プロジェクト: tensorx/tensorx
def test_sp_variable():
    x = tx.sparse_ones([[0, 2], [1, 1], [2, 0]], dense_shape=[3, 3])
    x2 = x * 2
    x3 = tx.sparse_ones([[0, 1], [0, 2], [1, 1], [2, 0]], dense_shape=[3, 3])
    v = tx.SparseVariable(x, validate_shape=False)

    v.assign(x2)
    assert tx.tensor_equal(tf.sparse.to_dense(v.value()),
                           tf.sparse.to_dense(x2))

    v.assign(x3)
    assert tx.tensor_equal(tf.sparse.to_dense(v.value()),
                           tf.sparse.to_dense(x3))
コード例 #20
0
def test_sparse_matrix_indices():
    x = tf.constant([[0, 1, 3], [1, 2, 3]])
    num_cols = 4
    expected_dense = [[1, 1, 0, 1], [0, 1, 1, 1]]

    sp_one_hot = tx.sparse_matrix_indices(x, num_cols, dtype=tf.int32)
    dense1 = tf.sparse.to_dense(sp_one_hot)

    assert tx.tensor_equal(dense1, expected_dense)

    sp_one_hot = tx.sparse_matrix_indices(x, num_cols)
    dense1 = tf.sparse.to_dense(sp_one_hot)

    assert not tx.tensor_equal(dense1, expected_dense)
コード例 #21
0
def test_lstm_cell():
    n_inputs = 3
    n_units = 4
    batch_size = 1
    inputs = tx.Input(n_units=n_inputs)

    lstm0 = tx.LSTMCell(
        inputs,
        n_units,
        activation=tf.tanh,
        gate_activation=tf.sigmoid,
        forget_bias_init=tf.initializers.ones(),
    )

    lstm1 = LSTMCell(n_units,
                     activation='tanh',
                     recurrent_activation='sigmoid',
                     unit_forget_bias=True,
                     implementation=2)

    state0 = [s() for s in lstm0.previous_state]
    #  get_initial_state from keras returns either a tuple or a single
    #  state see `test_rnn_cell`, but the __call__ API requires an iterable
    state1 = lstm1.get_initial_state(inputs, batch_size=1)

    assert tx.tensor_equal(state1, state0)

    inputs.value = tf.ones([batch_size, n_inputs])
    res1 = lstm1(inputs, state0)
    res1_ = lstm1(inputs, state0)

    for r1, r2 in zip(res1, res1_):
        assert tx.tensor_equal(r1, r2)

    # the only difference is that keras kernels are fused together
    kernel = tf.concat([w.weights.value() for w in lstm0.layer_state.w],
                       axis=-1)
    w_i, _, _, _ = tf.split(kernel, 4, axis=1)
    assert tx.tensor_equal(w_i, lstm0.w[0].weights.value())

    recurrent_kernel = tf.concat([u.weights for u in lstm0.layer_state.u],
                                 axis=-1)
    bias = tf.concat([w.bias for w in lstm0.layer_state.w], axis=-1)

    assert tx.tensor_equal(tf.shape(kernel), tf.shape(lstm1.kernel))
    assert tx.tensor_equal(tf.shape(recurrent_kernel),
                           tf.shape(lstm1.recurrent_kernel))
    assert tx.tensor_equal(tf.shape(bias), tf.shape(lstm1.bias))

    lstm1.kernel = kernel
    lstm1.recurrent_kernel = recurrent_kernel
    lstm1.bias = bias

    res2 = lstm1(inputs, state0)
    for i in range(len(res1)):
        assert not tx.tensor_equal(res1[i], res2[i])
    res0 = lstm0()
    assert tx.tensor_equal(res0, res2[0])
コード例 #22
0
def test_tensor_equal():
    t1 = tf.random.uniform([2, 2], dtype=tf.float32)
    t2 = tf.ones_like(t1)
    t3 = tf.random.uniform([3, 2], dtype=tf.float32)

    assert tx.tensor_equal(t1, t1)
    assert not tx.tensor_equal(t1, t2)
    assert not tx.tensor_equal(t1, t3)

    idx = tx.gumbel_top(tf.random.uniform([8, 8]), 2)
    idx = tx.matrix_indices(idx)
    sp1 = tf.SparseTensor(idx, values=tf.random.uniform([tf.shape(idx)[0]]), dense_shape=[8, 8])
    sp2 = tx.sparse_ones(idx, dense_shape=[8, 8])

    assert tx.tensor_equal(sp1, sp1)
    assert not tx.tensor_equal(sp1, sp2)
コード例 #23
0
def test_dynamic_concat():
    seq1 = [[1, 2], [3, 4]]
    seq2 = [[1, 2, 3], [4, 5, 6]]

    n = 10
    m = 4

    inputs = tx.Input(seq2, shape=[None, None], dtype=tf.int32, constant=False)
    inputs2 = tx.Input(seq2, dtype=tf.int32, constant=True)

    lookup = tx.Lookup(inputs, seq_size=None, embedding_shape=[n, m])
    lookup2 = tx.Lookup(inputs2, seq_size=3, embedding_shape=[n, m])
    concat1 = lookup.as_concat()
    concat2 = lookup2.as_concat()

    assert concat1.n_units is None
    assert concat2.n_units is not None

    concat3 = tx.SeqConcat(lookup, time_major=False)
    concat4 = tx.SeqConcat(lookup, seq_size=3, time_major=False)
    assert tx.shape_equal(concat4.shape, (None, 3 * 4))

    c1, c2 = concat1(), concat3()
    assert tx.tensor_equal(c1, c2)
    assert concat3.n_units is None
    assert concat4.n_units == 3 * lookup.n_units

    inputs.value = seq1
    l1 = lookup()
    inputs.value = seq2
    l2 = lookup()

    assert np.shape(l1)[-1] == m
    assert np.shape(l2)[-1] == m
コード例 #24
0
def test_lookup_sequence_transform():
    vocab_size = 4
    embed_dim = 2
    seq_size = 2

    inputs = tx.Input(n_units=seq_size, dtype=tf.int32)
    input_data = np.array([[2, 0], [1, 2], [0, 2]])
    lookup = tx.Lookup(inputs,
                       seq_size=seq_size,
                       embedding_shape=[vocab_size, embed_dim],
                       add_bias=True)
    concat_lookup = lookup.as_concat()
    seq_lookup = lookup.permute_batch_time()

    assert hasattr(lookup, "seq_size")

    inputs.value = input_data

    v1 = lookup()
    v2 = concat_lookup()
    v3 = seq_lookup()

    assert np.shape(v1) == (np.shape(input_data)[0], seq_size, embed_dim)
    assert np.shape(v2) == (np.shape(input_data)[0], seq_size * embed_dim)

    assert np.shape(v3) == (seq_size, np.shape(input_data)[0], embed_dim)
    assert tx.tensor_equal(v1[:, 0], v3[0])
コード例 #25
0
def test_sort_by_first():
    v1 = tf.constant([[3, 1], [2, 1]])
    sorted1 = [[1, 3], [1, 2]]
    v2 = tf.constant([[1, 2], [1, 2]])
    sorted2 = [[2, 1], [2, 1]]

    s1, s2 = tx.sort_by_first(v1, v2, ascending=True)

    assert tx.tensor_equal(s1, sorted1)
    assert tx.tensor_equal(s2, sorted2)

    s1, s2 = tx.sort_by_first([2, 1, 3], [1, 2, 3])
    sorted1 = [1, 2, 3]
    sorted2 = [2, 1, 3]
    assert tx.tensor_equal(s1, sorted1)
    assert tx.tensor_equal(s2, sorted2)
コード例 #26
0
def test_pairs():
    tensor1 = [[0], [1]]
    tensor2 = [1, 2, 3]
    expected = [[0, 1], [1, 1], [0, 2], [1, 2], [0, 3], [1, 3]]
    result = tx.pairs(tensor1, tensor2)

    assert tx.tensor_equal(result, expected)
コード例 #27
0
def test_grid():
    shape_2d = [4, 4]
    xys = tx.grid_2d(shape_2d)
    shape_xys = tf.shape(xys)

    assert tf.rank(xys) == 2
    assert tx.tensor_equal(shape_xys, [shape_2d[0] * shape_2d[1], 2])
コード例 #28
0
def test_module_rnn():
    """ Module + RNN integration
    """
    # test wrapping module around RNN because it has input dependencies that might not be given in the constructor
    x1 = tx.Input(tf.ones([1, 2, 3]), n_units=3, name="x1")
    x2 = tx.Input(tf.ones([1, 2, 3]), n_units=3, name="x2")
    rnn1 = tx.RNN(x1,
                  cell_config=tx.LSTMCell.config(n_units=4),
                  n_units=4,
                  stateful=False)
    rnn2 = tx.RNN(x1,
                  cell_config=tx.LSTMCell.config(n_units=4),
                  n_units=4,
                  stateful=False)

    out = tx.Concat(rnn1, rnn2)

    # add previous state as a dependency to a module
    m = tx.Module(inputs=x1,
                  output=out,
                  dependencies=rnn1.previous_state + rnn2.previous_state)

    m2 = m.reuse_with(x2)
    var_layers = set()
    for node in m2.graph.dependency_iter():
        if isinstance(node, tx.VariableLayer):
            var_layers.add(node)

    assert var_layers == set(rnn1.previous_state + rnn2.previous_state)
    assert tx.tensor_equal(m(), m2())
コード例 #29
0
def test_coupled_gate():
    vocab_size = 4
    n_features = 3
    seq_size = 2

    inputs = tx.Input(init_value=np.array([[2, 0], [1, 2]]),
                      n_units=seq_size,
                      dtype=tf.int32,
                      constant=True)

    features1 = tx.Lookup(inputs,
                          seq_size,
                          embedding_shape=[vocab_size,
                                           n_features]).as_concat()
    features2 = tx.Lookup(inputs,
                          seq_size,
                          embedding_shape=[vocab_size,
                                           n_features]).as_concat()
    gate_w = tx.Linear(features1, seq_size, add_bias=True)
    coupled_gate = tx.CoupledGate(features1, features2, gate_w)

    sp_features1 = tx.ToSparse(features1)
    assert tx.tensor_equal(tf.sparse.to_dense(sp_features1()), features1())

    sp_gate = tx.CoupledGate(sp_features1, features2, gate_w)
    print(sp_gate())
    print(sp_gate.shape)
    # coupled_gate2 = coupled_gate.reuse_with(sp_features1, features2)

    r1 = coupled_gate()
コード例 #30
0
def test_mul():
    # also tests graphs with constants
    inputs = tx.Constant(tf.constant(2), dtype=tf.float64)
    inputs2 = inputs * 2
    assert tx.tensor_equal(inputs2(), inputs() * 2)

    inputs2_fn = tf.function(inputs2.__call__)
    assert inputs2_fn() == inputs2()