def test_layer_creation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32)
def test_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint(vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype)