Exemple #1
0
    def test_linear(self):
        '''test linear'''
        inputs = tf.random_uniform(shape=[4, 5], dtype=tf.float32,
                                   maxval=1.0)  # A 2D tensor
        shape = [5, 4]
        output = cl.linear(inputs, 'test_linear0', shape)
        output_shape = [4, 4]
        self.assertAllEqual(tf.shape(output), output_shape)

        inputs = tf.random_uniform(shape=[2, 4, 5],
                                   dtype=tf.float32,
                                   maxval=1.0)  # A 3D tensor
        shape = [5, 4]
        output = cl.linear(inputs, 'test_linear1', shape)
        output_shape = [2, 4, 4]
        self.assertAllEqual(tf.shape(output), output_shape)

        # A 4D tensor [B, C, H, W]
        inputs = tf.random_uniform(shape=[2, 3, 4, 5],
                                   dtype=tf.float32,
                                   maxval=1.0)
        shape = [5, 4]
        output = cl.linear(inputs, 'test_linear2', shape)
        output_shape = [2, 3, 4, 4]
        self.assertAllEqual(tf.shape(output), output_shape)
Exemple #2
0
    def test_tdnn(self):
        '''test tdnn'''
        #A 3D Tensor [batch, in_width, in_channels]
        inputs = tf.random_uniform(shape=[2, 5, 3],
                                   dtype=tf.float32,
                                   maxval=1.0)
        in_dim = inputs.get_shape().as_list()[2]
        out_dim = 4
        context = [-2, -1, 0, 1, 2]
        output = cl.tdnn(inputs,
                         'test_tdnn0',
                         in_dim,
                         context,
                         out_dim,
                         method='splice_layer')
        out_shape = [2, 5, 4]
        self.assertAllEqual(tf.shape(output), out_shape)

        context = 2
        #output = cl.tdnn(inputs, 'test_tdnn1', in_dim, context, out_dim, method='splice_op')
        #self.assertAllEqual(tf.shape(output), out_shape)

        output = cl.tdnn(inputs,
                         'test_tdnn2',
                         in_dim,
                         context,
                         out_dim,
                         method='conv1d')
        self.assertAllEqual(tf.shape(output), out_shape)
def embedding_look_up(text_inputs, vocab_size, embedding_size):
  """Embedding layer."""
  with tf.variable_scope("embedding"):
    W = tf.get_variable(
        name='W',
        initializer=tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
    embedding_chars = tf.nn.embedding_lookup(W, text_inputs)
    embedding_chars_expanded = tf.expand_dims(embedding_chars, -1)
  return embedding_chars_expanded
Exemple #4
0
 def test_conv2d(self):
     '''test conv2d'''
     inputs = tf.random_uniform(shape=[2, 5, 5, 3],
                                dtype=tf.float32,
                                maxval=1.0)  #A 4D Tensor
     filter_size = [3, 3]
     in_channels = inputs.get_shape().as_list()[3]
     out_channels = 4
     strides = [1, 1]
     output = cl.conv2d(inputs, 'test_conv2d', filter_size, in_channels,
                        out_channels, strides)
     output_shape = [2, 5, 5, 4]
     self.assertAllEqual(tf.shape(output), output_shape)
Exemple #5
0
 def test_conv_pool(self):
     '''test  conv pool'''
     # A 4D tensor [B, H, W, C]
     embedded_chars_expanded = tf.random_uniform(shape=[2, 7, 7, 1],
                                                 dtype=tf.float32,
                                                 maxval=1.0)
     filter_sizes = [3, 5]
     embedding_size = 3
     num_filters = 3
     sequence_length = 5
     output = cl.conv_pool(embedded_chars_expanded, filter_sizes,
                           embedding_size, num_filters, sequence_length)
     output_shape = [30, 6]
     self.assertAllEqual(tf.shape(output), output_shape)
Exemple #6
0
 def test_attention(self):
     '''test attention'''
     # A 3D tensor [B, T, D]
     inputs = tf.random_uniform(shape=[2, 100, 512],
                                dtype=tf.float32,
                                maxval=1.0)
     attention_size = 256
     output, alpha = cl.attention(inputs,
                                  attention_size,
                                  return_alphas=True)
     output_shape = [2, 512]
     alpha_shape = [2, 100, 1]
     self.assertAllEqual(tf.shape(output), output_shape)
     self.assertAllEqual(tf.shape(alpha), alpha_shape)