def testConv1DRank1BatchEnsemble(self, alpha_initializer, gamma_initializer): tf.keras.backend.set_learning_phase(1) # training time ensemble_size = 3 examples_per_model = 4 input_dim = 5 output_dim = 5 inputs = tf.random.normal([examples_per_model, 4, input_dim]) batched_inputs = tf.tile(inputs, [ensemble_size, 1, 1]) layer = rank1_bnn_layers.Conv1DRank1( output_dim, kernel_size=2, padding='same', alpha_initializer=alpha_initializer, gamma_initializer=gamma_initializer, alpha_regularizer=None, gamma_regularizer=None, activation=None, ensemble_size=ensemble_size) output = layer(batched_inputs) manual_output = [ layer.conv1d(inputs*layer.alpha[i]) * layer.gamma[i] + layer.bias[i] for i in range(ensemble_size)] manual_output = tf.concat(manual_output, axis=0) self.assertEqual(output.shape, (ensemble_size*examples_per_model, 4, output_dim)) self.assertAllClose(output, manual_output)
def testConv1DRank1Model(self): inputs = np.random.rand(3, 4, 1).astype(np.float32) model = tf.keras.Sequential([ rank1_bnn_layers.Conv1DRank1(3, kernel_size=2, padding='SAME', activation=tf.nn.relu), tf.keras.layers.Flatten(), tf.keras.layers.Dense(2, activation=None), ]) outputs = model(inputs, training=True) self.assertEqual(outputs.shape, (3, 2)) self.assertLen(model.losses, 2)
def testConv1DRank1AlphaGamma(self, alpha_initializer, gamma_initializer, all_close, use_additive_perturbation, ensemble_size): tf.keras.backend.set_learning_phase(1) # training time inputs = np.random.rand(5 * ensemble_size, 4, 12).astype(np.float32) model = rank1_bnn_layers.Conv1DRank1( 4, kernel_size=2, alpha_initializer=alpha_initializer, gamma_initializer=gamma_initializer, activation=None) outputs1 = model(inputs) outputs2 = model(inputs) self.assertEqual(outputs1.shape, (5 * ensemble_size, 3, 4)) if all_close: self.assertAllClose(outputs1, outputs2) else: self.assertNotAllClose(outputs1, outputs2) model.get_config()