Ejemplo n.º 1
0
def test_linear_rank3():
    val = tf.constant([[[1], [1]], [[2], [2]]])
    x1 = tx.Input(val, dtype=tf.float32)
    x2 = tx.Transpose(x1)

    assert val.shape[1:] == x1.shape[1:]

    x1_flat = tx.Reshape(x1, [-1, 1])

    linear1 = tx.Linear(x1, n_units=2)
    linear2 = tx.Linear(x2,
                        weights_shape=[2, 1],
                        weights=linear1.weights,
                        transpose_weights=True)

    # we cant do this because it changes the definition
    # of the layer (n_units etc)
    with pytest.raises(ValueError):
        linear1.reuse_with(x2, transpose_weights=True)
        pytest.fail(
            "can't reuse with transpose weights while changing the layer definition"
        )

    linear_flat = linear1.reuse_with(x1_flat, shape=(4, 2))
    x1_tensor = x1()
    new_shape = x1_tensor.shape[:-1] + [2]

    linear_flat = tx.Reshape(linear_flat, new_shape)

    assert tx.tensor_equal(linear1(), linear_flat())
    assert tx.tensor_equal(tf.shape(linear2()), [1, 2, 1])
Ejemplo n.º 2
0
def test_reshape_shape():
    x = tf.reshape(tf.range(9), [3, 3, 1])
    x = tx.Input(x, dtype=tf.float32)
    flat = tx.Reshape(x, [-1, 1])
    assert flat.shape[0] is None
    assert flat.shape[-1] == 1

    x = tx.Input(x, shape=[3, 3, 1], dtype=tf.float32)
    print(x.shape)
    flat = tx.Reshape(x, [-1, 1])
    print(flat.shape)
Ejemplo n.º 3
0
            def categorical_loss(labels, logits):
                # labels come as a batch of classes [[1,2],[3,4]] -> [1,3,2,4] time steps are ordered to match logits
                labels = tx.Transpose(labels)
                labels = tx.Reshape(labels, [-1])
                labels = tx.dense_one_hot(labels, num_cols=vocab_size)
                loss = tx.categorical_cross_entropy(labels=labels,
                                                    logits=logits)

                return tf.reduce_mean(loss)
Ejemplo n.º 4
0
def test_transpose_reshape():
    x = tf.reshape(tf.range(9), [3, 3])
    x2 = tx.Reshape(tf.range(9), [3, 3])

    assert tx.tensor_equal(x2(), x)
    assert tx.tensor_equal(x2.compute(tf.range(9)), x)

    t = tf.transpose(x)
    y = tx.Transpose(t)
    assert tx.tensor_equal(y(), x)
    assert tx.tensor_equal(y.compute(x), t)

    x = tf.reshape(tf.ones([18]), [-1, 3, 2])

    x2 = tx.Reshape(tf.ones([18]), [-1, 3, 2])

    assert x.shape == [3, 3, 2]
    assert x.shape == x2.shape
Ejemplo n.º 5
0
input_size = 10000
var_size = 500
batch_size = 20
seq_size = 30

inputs = tf.constant(np.random.randint(0, 10, size=[batch_size, seq_size]), name="inputs")
targets = tf.constant(np.random.randint(0, 10, size=[batch_size * seq_size]), name="targets")
targets = tf.one_hot(targets, input_size)

inputs = tx.TensorLayer(inputs)

with jit_scope():
    with tf.name_scope("scope1"):
        lookup = tx.Lookup(inputs, seq_size=seq_size, lookup_shape=[input_size, var_size], name="lookup")
        seq = lookup.permute_batch_time()
        seq = tx.Reshape(seq, [-1, var_size], name="flatten")
        mul1 = tx.Linear(seq, input_size, name="test_logits")
        mul2 = tx.Linear(seq,
                         n_units=input_size,
                         shared_weights=lookup.weights,
                         transpose_weights=True,
                         name="shared_embeddings")

    with tf.name_scope("scope2"):
        mul1 = mul1.reuse_with(seq)
        mul2 = mul2.reuse_with(seq)

rnd_loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=targets, logits=mul1))
rnd_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=targets, logits=mul2))

config = tf.ConfigProto()