예제 #1
0
def test_wrap_shape():
    x = tx.Input(n_units=3)
    t = tx.Transpose(x, n_units=3)
    assert t.shape[-1] == 3

    w = tx.Wrap(t, wrap_fn=lambda layer: layer * 2)
    assert w.shape == [3, 3]
예제 #2
0
def test_linear_rank3():
    val = tf.constant([[[1], [1]], [[2], [2]]])
    x1 = tx.Input(val, dtype=tf.float32)
    x2 = tx.Transpose(x1)

    assert val.shape[1:] == x1.shape[1:]

    x1_flat = tx.Reshape(x1, [-1, 1])

    linear1 = tx.Linear(x1, n_units=2)
    linear2 = tx.Linear(x2,
                        weights_shape=[2, 1],
                        weights=linear1.weights,
                        transpose_weights=True)

    # we cant do this because it changes the definition
    # of the layer (n_units etc)
    with pytest.raises(ValueError):
        linear1.reuse_with(x2, transpose_weights=True)
        pytest.fail(
            "can't reuse with transpose weights while changing the layer definition"
        )

    linear_flat = linear1.reuse_with(x1_flat, shape=(4, 2))
    x1_tensor = x1()
    new_shape = x1_tensor.shape[:-1] + [2]

    linear_flat = tx.Reshape(linear_flat, new_shape)

    assert tx.tensor_equal(linear1(), linear_flat())
    assert tx.tensor_equal(tf.shape(linear2()), [1, 2, 1])
예제 #3
0
def test_module_shape():
    x = tx.Input(n_units=3)
    t = tx.Transpose(x, n_units=3)
    mul = t * 2
    assert mul.shape == [3, 3]
    m = tx.Module(output=mul, inputs=x)
    assert m.n_units == 3
    m()
예제 #4
0
            def categorical_loss(labels, logits):
                # labels come as a batch of classes [[1,2],[3,4]] -> [1,3,2,4] time steps are ordered to match logits
                labels = tx.Transpose(labels)
                labels = tx.Reshape(labels, [-1])
                labels = tx.dense_one_hot(labels, num_cols=vocab_size)
                loss = tx.categorical_cross_entropy(labels=labels,
                                                    logits=logits)

                return tf.reduce_mean(loss)
예제 #5
0
def test_transpose():
    tensor = tf.ones([3, 3])
    trans_tensor = tf.transpose(tensor)
    trans_layer = tx.Transpose(tensor, n_units=3)

    assert trans_layer.input.shape == [3, 3]
    assert trans_layer.shape == trans_tensor.shape

    tensor = tf.ones([2, 3, 4])
    perm = [2, 0, 1]
    trans_tensor = tf.transpose(tensor, perm)
    trans_layer = tx.Transpose(tensor, perm)

    assert trans_layer.input.n_units == tensor.shape[-1]
    assert trans_layer.shape == trans_tensor.shape
    assert trans_layer.n_units == tensor.shape[perm[-1]]

    inputs = tx.Input(shape=tf.TensorShape([None, 3]))
    trans = tx.Transpose(inputs)
    assert trans.shape[-1] is None
    assert trans.shape[0] == 3
예제 #6
0
def test_mul_shape():
    x = tx.Input(n_units=3)
    m = x * 2
    assert m.shape[0] is None
    assert m.shape[-1] is 3

    t = tx.Transpose(x)
    assert t.shape[-1] is None
    t = tx.Transpose(x, n_units=3)
    assert t.shape == [3, 3]
    m = t * 2
    assert m.shape == [3, 3]

    x = tx.Input(n_units=3)  # [None,3]
    t = tx.Transpose(x)  # [None,None]
    assert t.shape[0] == 3
    assert t.shape[-1] is None

    m = t * 2  # [None,None]

    # TensorShape([3,None]) != TensorShape([3,None])
    # because we don't know what None is
    assert m.shape[0] == 3
    assert m.shape[-1] is None
예제 #7
0
def test_wrap_transpose():
    tensor = tf.reshape(tf.range(9), [3, 3])
    t = tf.transpose(tensor)

    t_layer = tx.Transpose(t, n_units=3)
    assert t_layer.shape == (3, 3)

    mul2 = tx.Wrap(t_layer, wrap_fn=lambda layer: layer * 2)
    mul2_2 = mul2.reuse_with(tensor)

    assert tx.tensor_equal(mul2_2(), t * 2)
    assert tx.tensor_equal(mul2(tensor), t * 2)
    assert tx.tensor_equal(mul2(t), mul2())
    assert tx.tensor_equal(mul2.compute(t), mul2())
    assert tx.tensor_equal(mul2.compute(t), tf.transpose(t) * 2)
    assert tx.tensor_equal(t_layer.compute(t), tensor)
    assert tx.tensor_equal(mul2_2.compute(tensor), mul2_2())
예제 #8
0
def test_transpose_reshape():
    x = tf.reshape(tf.range(9), [3, 3])
    x2 = tx.Reshape(tf.range(9), [3, 3])

    assert tx.tensor_equal(x2(), x)
    assert tx.tensor_equal(x2.compute(tf.range(9)), x)

    t = tf.transpose(x)
    y = tx.Transpose(t)
    assert tx.tensor_equal(y(), x)
    assert tx.tensor_equal(y.compute(x), t)

    x = tf.reshape(tf.ones([18]), [-1, 3, 2])

    x2 = tx.Reshape(tf.ones([18]), [-1, 3, 2])

    assert x.shape == [3, 3, 2]
    assert x.shape == x2.shape
예제 #9
0
def test_model_var_inputs():
    # wanted to test when our train graph has more inputs that do not need to be fed (e.g. variable state)
    n_features = 5
    embed_size = 4
    hidden_dim = 3
    seq_size = 3
    out_size = 2
    batch_size = 2

    x = tx.Input(np.random.random([batch_size, seq_size]),
                 n_units=seq_size,
                 dtype=tf.int32)
    y = tx.Input(np.random.random([batch_size, out_size]),
                 n_units=out_size,
                 dtype=tf.float32)
    lookup = tx.Lookup(x,
                       seq_size=seq_size,
                       embedding_shape=[n_features, embed_size])
    # seq = lookup.permute_batch_time()
    seq = tx.Transpose(lookup, [1, 0, 2])

    rnn1 = tx.RNN(seq, cell_config=tx.RNNCell.config(n_units=hidden_dim))
    y_ = tx.Linear(rnn1[seq_size - 1], n_units=out_size)

    # y_ = tx.Linear(tx.SeqConcat(lookup, seq_size=seq_size), n_units=out_size)

    # @tx.layer(n_units=2, dtype=tf.float32, name="loss")
    # def loss(pred, labels):
    #    return tx.mse(pred, labels)

    model = tx.Model(run_inputs=x,
                     run_outputs=y_,
                     train_inputs=[x, y],
                     train_outputs=y_,
                     train_loss=tx.MSE(y_, y))

    # model.draw("test.pdf")

    model.set_optimizer(tf.optimizers.SGD, lr=0.5)

    data1 = [[0, 1, 2], [2, 1, 0]]
    data2 = [[0., 1.], [1., 0.]]

    model.train_step(input_feed={x: data1, y: data2})
예제 #10
0
                     kernel_u,
                     merge_fn=lambda l: tf.concat(l, axis=0))

# kernel = tx.Reshape(kernel, [-1, 4 * cell_units])

tf_zero_state = tf_cell.zero_state(batch_size, dtype=tf.float32)
tf_out, tf_state = tf_cell(t1.tensor, state=tf_zero_state)

# inject my internal state into TensorFlow lstm
tf_cell._kernel = tx_kernel
tf_out, tf_state = tf_cell(t1.tensor, state=tf_zero_state)

tx_rnn = tx.RNN(seq,
                cell_proto=lambda x, **kwargs: tx_cell.reuse_with(x, **kwargs),
                stateful=False)
tx_rnn = tx.Transpose(tx_rnn, [1, 0, 2])

# time major maintains the format in the output
# if time major output is time major
# if batch major, output is batch major
tf_rnn, tf_state = tf.nn.dynamic_rnn(
    cell=tf_cell,
    inputs=lookup.tensor,
    sequence_length=None,
    initial_state=tf_zero_state,
    time_major=False,
)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())