def testNGramLayerOutput(self): inputs = tf.constant([[0, 0, 0, 0, 1], [2, 1, 2, 1, 0]], dtype=tf.int32) layer = ngram.NGram(3, minval=1, maxval=3) outputs = layer(inputs) expected_outputs = tf.constant( [[4., 1., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0.], [1., 2., 2., 0., 0., 0., 0., 0., 0., 0., 2., 0.]], dtype=tf.float32) outputs_val, expected_outputs_val = self.evaluate( [outputs, expected_outputs]) self.assertAllEqual(outputs_val, expected_outputs_val)
def testNGramLayerShape(self): batch_size = 2 length = 8 vocab_size = 3 minval = 1 maxval = 4 inputs = tf.random_uniform([batch_size, length], minval=0, maxval=vocab_size, dtype=tf.int32) layer = ngram.NGram(vocab_size, minval, maxval) outputs = layer(inputs) outputs_val = self.evaluate(outputs) num_ngrams = sum([vocab_size**n for n in range(minval, maxval)]) self.assertEqual(outputs_val.shape, (batch_size, num_ngrams))