def test_keras_model_multiple_inputs(self): model = model_examples.build_multiple_inputs_keras_model() model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=tf.keras.losses.MSE, metrics=[NumBatchesCounter(), NumExamplesCounter()]) dummy_batch = collections.OrderedDict([ ('x', [ np.zeros([1, 1], dtype=np.float32), np.zeros([1, 1], dtype=np.float32) ]), ('y', np.zeros([1, 1], dtype=np.float32)), ]) tff_model = keras_utils.from_compiled_keras_model( keras_model=model, dummy_batch=dummy_batch) batch_size = 2 batch = { 'x': [ np.ones(shape=[batch_size, 1], dtype=np.float32), np.ones(shape=[batch_size, 1], dtype=np.float32) ], 'y': np.asarray([[2.0], [2.0]]).astype(np.float32), } num_iterations = 2 for _ in range(num_iterations): self.evaluate(tff_model.train_on_batch(batch)) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_iterations]) self.assertEqual(m['num_examples'], [batch_size * num_iterations]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], batch_size * num_iterations) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_multiple_inputs_keras_model() tff_weights.assign_weights_to(keras_model) keras_model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=tf.keras.losses.MSE, metrics=[NumBatchesCounter(), NumExamplesCounter()]) loaded_model = keras_utils.from_compiled_keras_model( keras_model=keras_model, dummy_batch=dummy_batch) orig_model_output = tff_model.forward_pass(batch) loaded_model_output = loaded_model.forward_pass(batch) self.assertAlmostEqual(self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))
def test_keras_model_multiple_inputs(self): input_spec = collections.OrderedDict(x=collections.OrderedDict( a=tf.TensorSpec(shape=[None, 1], dtype=tf.float32), b=tf.TensorSpec(shape=[1, 1], dtype=tf.float32)), y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32)) model = model_examples.build_multiple_inputs_keras_model() tff_model = keras_utils.from_keras_model( keras_model=model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) batch_size = 2 real_batch = collections.OrderedDict( x=collections.OrderedDict(a=np.ones(shape=[batch_size, 1], dtype=np.float32), b=np.ones(shape=[batch_size, 1], dtype=np.float32)), y=np.asarray([[2.0], [2.0]]).astype(np.float32)) num_train_steps = 2 for _ in range(num_train_steps): self.evaluate(tff_model.forward_pass(real_batch)) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_train_steps]) self.assertEqual(m['num_examples'], [batch_size * num_train_steps]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], batch_size * num_train_steps) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_multiple_inputs_keras_model() tff_weights.assign_weights_to(keras_model) loaded_model = keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) orig_model_output = tff_model.forward_pass(real_batch) loaded_model_output = loaded_model.forward_pass(real_batch) self.assertAlmostEqual(self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))