示例#1
0
 def test_squeeze(self):
     """Test invoking Squeeze in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 1, 4).astype(np.float32)
             result = layers.Squeeze()(input)
             assert result.shape == (5, 4)
示例#2
0
 def test_max_pool_1d(self):
     """Test invoking MaxPool1D in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(4, 6, 8).astype(np.float32)
             result = layers.MaxPool1D(strides=2)(input)
             assert result.shape == (4, 3, 8)
示例#3
0
 def test_reshape(self):
     """Test invoking Reshape in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 10, 4).astype(np.float32)
             result = layers.Reshape((100, 2))(input)
             assert result.shape == (100, 2)
示例#4
0
 def test_max_pool_3d(self):
     """Test invoking MaxPool3D in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(2, 4, 6, 8, 2).astype(np.float32)
             result = layers.MaxPool3D()(input)
             assert result.shape == (2, 2, 3, 4, 2)
示例#5
0
 def test_cast(self):
     """Test invoking Cast in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 3)
             result = layers.Cast(dtype=tf.float32)(input)
             assert result.dtype == tf.float32
示例#6
0
 def test_constant(self):
     """Test invoking Constant in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             value = np.random.rand(5, 4).astype(np.float32)
             result = layers.Constant(value)()
             assert np.array_equal(result, value)
示例#7
0
    def test_vina_free_energy(self):
        """Test invoking VinaFreeEnergy in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                n_atoms = 5
                m_nbrs = 1
                ndim = 3
                nbr_cutoff = 1
                start = 0
                stop = 4
                X = np.random.rand(n_atoms, ndim).astype(np.float32)
                Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
                layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim,
                                              nbr_cutoff, start, stop)
                result = layer(X, Z)
                assert len(layer.variables) == 6
                assert result.shape == tuple()

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim,
                                               nbr_cutoff, start, stop)
                result2 = layer2(X, Z)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(X, Z)
                assert np.allclose(result, result3)
示例#8
0
 def test_flatten(self):
     """Test invoking Flatten in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 10, 4).astype(np.float32)
             result = layers.Flatten()(input)
             assert result.shape == (5, 40)
示例#9
0
    def test_alpha_share_layer(self):
        """Test invoking AlphaShareLayer in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                batch_size = 10
                length = 6
                input1 = np.random.rand(batch_size, length).astype(np.float32)
                input2 = np.random.rand(batch_size, length).astype(np.float32)
                layer = layers.AlphaShareLayer()
                result = layer(input1, input2)
                assert input1.shape == result[0].shape
                assert input2.shape == result[1].shape

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.AlphaShareLayer()
                result2 = layer2(input1, input2)
                assert not np.allclose(result[0], result2[0])
                assert not np.allclose(result[1], result2[1])

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(input1, input2)
                assert np.allclose(result[0], result3[0])
                assert np.allclose(result[1], result3[1])
示例#10
0
    def test_conv_2d_transpose(self):
        """Test invoking Conv2DTranspose in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                length = 4
                width = 5
                in_channels = 2
                filters = 3
                kernel_size = 2
                stride = 2
                batch_size = 10
                input = np.random.rand(batch_size, length, width,
                                       in_channels).astype(np.float32)
                layer = layers.Conv2DTranspose(filters,
                                               kernel_size=kernel_size,
                                               stride=stride)
                result = layer(input)
                assert result.shape == (batch_size, length * stride,
                                        width * stride, filters)
                assert len(layer.variables) == 2

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.Conv2DTranspose(filters,
                                                kernel_size=kernel_size,
                                                stride=stride)
                result2 = layer2(input)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(input)
                assert np.allclose(result, result3)
示例#11
0
    def test_graph_embed_pool_layer(self):
        """Test invoking GraphEmbedPoolLayer in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
                adjs = np.random.uniform(size=(10, 100, 5,
                                               100)).astype(np.float32)
                layer = layers.GraphEmbedPoolLayer(num_vertices=6)
                result = layer(V, adjs)
                assert result[0].shape == (10, 6, 50)
                assert result[1].shape == (10, 6, 5, 6)

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
                result2 = layer2(V, adjs)
                assert not np.allclose(result[0], result2[0])
                assert not np.allclose(result[1], result2[1])

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(V, adjs)
                assert np.allclose(result[0], result3[0])
                assert np.allclose(result[1], result3[1])
示例#12
0
    def test_time_series_dense(self):
        """Test invoking TimeSeriesDense in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                in_dim = 2
                out_dim = 3
                n_steps = 6
                batch_size = 10
                input = np.random.rand(batch_size, n_steps,
                                       in_dim).astype(np.float32)
                layer = layers.TimeSeriesDense(out_dim)
                result = layer(input)
                assert result.shape == (batch_size, n_steps, out_dim)
                assert len(layer.variables) == 2

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.TimeSeriesDense(out_dim)
                result2 = layer2(input)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(input)
                assert np.allclose(result, result3)
示例#13
0
    def test_lstm(self):
        """Test invoking LSTM in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                batch_size = 10
                n_hidden = 7
                in_channels = 4
                n_steps = 6
                input = np.random.rand(batch_size, n_steps,
                                       in_channels).astype(np.float32)
                layer = layers.LSTM(n_hidden, batch_size)
                result, state = layer(input)
                assert result.shape == (batch_size, n_steps, n_hidden)
                assert len(layer.variables) == 2

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.LSTM(n_hidden, batch_size)
                result2, state2 = layer2(input)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3, state3 = layer(input)
                assert np.allclose(result, result3)

                # But if we specify a different starting state, that should produce a
                # different result.

                result4, state4 = layer(input, initial_state=state3)
                assert not np.allclose(result, result4)
示例#14
0
 def test_sigmoid(self):
     """Test invoking Sigmoid in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 10).astype(np.float32)
             result = layers.Sigmoid()(input)
             expected = tf.nn.sigmoid(input)
             assert np.allclose(result, expected)
示例#15
0
 def test_relu(self):
     """Test invoking ReLU in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.normal(size=(5, 10)).astype(np.float32)
             result = layers.ReLU()(input)
             expected = tf.nn.relu(input)
             assert np.allclose(result, expected)
示例#16
0
 def test_gather(self):
     """Test invoking Gather in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5).astype(np.float32)
             indices = [[1], [3]]
             result = layers.Gather()(input, indices)
             assert np.array_equal(result, [input[1], input[3]])
示例#17
0
 def test_repeat(self):
     """Test invoking Repeat in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 4).astype(np.float32)
             result = layers.Repeat(3)(input)
             assert result.shape == (5, 3, 4)
             assert np.array_equal(result[:, 0, :], result[:, 1, :])
示例#18
0
 def test_reduce_sum(self):
     """Test invoking ReduceSum in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input = np.random.rand(5, 10).astype(np.float32)
             result = layers.ReduceSum(axis=1)(input)
             assert result.shape == (5, )
             assert np.allclose(result, np.sum(input, axis=1))
示例#19
0
 def test_sluice_loss(self):
     """Test invoking SluiceLoss in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.ones((3, 4)).astype(np.float32)
             input2 = np.ones((2, 2)).astype(np.float32)
             result = layers.SluiceLoss()(input1, input2)
             assert np.allclose(result, 40.0)
示例#20
0
 def test_l2_loss(self):
     """Test invoking L2Loss in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 10).astype(np.float32)
             input2 = np.random.rand(5, 10).astype(np.float32)
             result = layers.L2Loss()(input1, input2)
             expected = np.mean((input1 - input2)**2, axis=1)
             assert np.allclose(result, expected)
示例#21
0
 def test_variable(self):
     """Test invoking Variable in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             value = np.random.rand(5, 4).astype(np.float32)
             layer = layers.Variable(value)
             result = layer()
             assert np.array_equal(result.numpy(), value)
             assert len(layer.variables) == 1
示例#22
0
 def test_weighted_error(self):
     """Test invoking WeightedError in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 10).astype(np.float32)
             input2 = np.random.rand(5, 10).astype(np.float32)
             result = layers.WeightedError()(input1, input2)
             expected = np.sum(input1 * input2)
             assert np.allclose(result, expected)
示例#23
0
 def test_hinge_loss(self):
     """Test invoking HingeLoss in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             n_labels = 1
             n_logits = 1
             logits = np.random.rand(n_logits).astype(np.float32)
             labels = np.random.rand(n_labels).astype(np.float32)
             result = layers.HingeLoss()(labels, logits)
             assert result.shape == (n_labels, )
示例#24
0
 def test_ani_feat(self):
     """Test invoking ANIFeat in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             batch_size = 10
             max_atoms = 5
             input = np.random.rand(batch_size, max_atoms,
                                    4).astype(np.float32)
             layer = layers.ANIFeat(max_atoms=max_atoms)
             result = layer(input)
示例#25
0
 def test_concat(self):
     """Test invoking Concat in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 10).astype(np.float32)
             input2 = np.random.rand(5, 4).astype(np.float32)
             result = layers.Concat()(input1, input2)
             assert result.shape == (5, 14)
             assert np.array_equal(input1, result[:, :10])
             assert np.array_equal(input2, result[:, 10:])
示例#26
0
 def test_stack(self):
     """Test invoking Stack in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 4).astype(np.float32)
             input2 = np.random.rand(5, 4).astype(np.float32)
             result = layers.Stack()(input1, input2)
             assert result.shape == (5, 2, 4)
             assert np.array_equal(input1, result[:, 0, :])
             assert np.array_equal(input2, result[:, 1, :])
示例#27
0
 def test_reduce_square_difference(self):
     """Test invoking ReduceSquareDifference in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 10).astype(np.float32)
             input2 = np.random.rand(5, 10).astype(np.float32)
             result = layers.ReduceSquareDifference(axis=1)(input1, input2)
             assert result.shape == (5, )
             assert np.allclose(result, np.mean((input1 - input2)**2,
                                                axis=1))
示例#28
0
 def test_weighted_linear_combo(self):
     """Test invoking WeightedLinearCombo in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             input1 = np.random.rand(5, 10).astype(np.float32)
             input2 = np.random.rand(5, 10).astype(np.float32)
             layer = layers.WeightedLinearCombo()
             result = layer(input1, input2)
             assert len(layer.variables) == 2
             expected = input1 * layer.variables[
                 0] + input2 * layer.variables[1]
             assert np.allclose(result, expected)
示例#29
0
 def test_batch_norm(self):
     """Test invoking BatchNorm in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             batch_size = 10
             n_features = 5
             input = np.random.rand(batch_size,
                                    n_features).astype(np.float32)
             layer = layers.BatchNorm()
             result = layer(input)
             assert result.shape == (batch_size, n_features)
             assert len(layer.variables) == 4
示例#30
0
 def test_sparse_softmax_cross_entropy(self):
     """Test invoking SparseSoftMaxCrossEntropy in eager mode."""
     with context.eager_mode():
         with tfe.IsolateTest():
             batch_size = 10
             n_features = 5
             logits = np.random.rand(batch_size,
                                     n_features).astype(np.float32)
             labels = np.random.rand(batch_size).astype(np.int32)
             result = layers.SparseSoftMaxCrossEntropy()(labels, logits)
             expected = tf.nn.sparse_softmax_cross_entropy_with_logits(
                 labels=labels, logits=logits)
             assert np.allclose(result, expected)