def test_from_compiled_keras_model_fails_on_uncompiled_model(self): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) with self.assertRaisesRegex(ValueError, '`keras_model` must be compiled'): keras_utils.from_compiled_keras_model( keras_model=keras_model, dummy_batch=_create_dummy_batch(feature_dims=1))
def test_keras_model_multiple_inputs(self): model = model_examples.build_multiple_inputs_keras_model() model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=tf.keras.losses.MSE, metrics=[NumBatchesCounter(), NumExamplesCounter()]) dummy_batch = collections.OrderedDict([ ('x', [ np.zeros([1, 1], dtype=np.float32), np.zeros([1, 1], dtype=np.float32) ]), ('y', np.zeros([1, 1], dtype=np.float32)), ]) tff_model = keras_utils.from_compiled_keras_model( keras_model=model, dummy_batch=dummy_batch) batch_size = 2 batch = { 'x': [ np.ones(shape=[batch_size, 1], dtype=np.float32), np.ones(shape=[batch_size, 1], dtype=np.float32) ], 'y': np.asarray([[2.0], [2.0]]).astype(np.float32), } num_iterations = 2 for _ in range(num_iterations): self.evaluate(tff_model.train_on_batch(batch)) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_iterations]) self.assertEqual(m['num_examples'], [batch_size * num_iterations]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], batch_size * num_iterations) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_multiple_inputs_keras_model() tff_weights.assign_weights_to(keras_model) keras_model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=tf.keras.losses.MSE, metrics=[NumBatchesCounter(), NumExamplesCounter()]) loaded_model = keras_utils.from_compiled_keras_model( keras_model=keras_model, dummy_batch=dummy_batch) orig_model_output = tff_model.forward_pass(batch) loaded_model_output = loaded_model.forward_pass(batch) self.assertAlmostEqual(self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))
def test_from_compiled_keras_model_fails_on_uncompiled_model(self): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(ValueError, '`keras_model` must be compiled'): keras_utils.from_compiled_keras_model( keras_model=keras_model, dummy_batch=_create_dummy_batch(feature_dims=1)) self.assertLen(w, 1) self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def model_fn(): keras_model = build_keras_model_fn(feature_dims=2) keras_model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[]) return keras_utils.from_compiled_keras_model(keras_model, dummy_batch)
def test_keras_model_using_batch_norm(self): model = model_examples.build_conv_batch_norm_keras_model() def loss_fn(y_true, y_pred): loss_per_example = tf.keras.losses.sparse_categorical_crossentropy( y_true=y_true, y_pred=y_pred) return tf.reduce_mean(loss_per_example) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=loss_fn, metrics=[NumBatchesCounter(), NumExamplesCounter()]) dummy_batch = collections.OrderedDict([ ('x', np.zeros([1, 28 * 28], dtype=np.float32)), ('y', np.zeros([1, 1], dtype=np.int64)), ]) tff_model = keras_utils.from_compiled_keras_model( keras_model=model, dummy_batch=dummy_batch) batch_size = 2 batch = { 'x': np.random.uniform(low=0.0, high=1.0, size=[batch_size, 28 * 28]).astype(np.float32), 'y': np.random.random_integers(low=0, high=9, size=[batch_size, 1]).astype(np.int64), } num_iterations = 2 for _ in range(num_iterations): self.evaluate(tff_model.train_on_batch(batch)) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_iterations]) self.assertEqual(m['num_examples'], [batch_size * num_iterations]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], batch_size * num_iterations) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_conv_batch_norm_keras_model() tff_weights.assign_weights_to(keras_model) def assert_all_weights_close(keras_weights, tff_weights): for keras_w, tff_w in zip(keras_weights, six.itervalues(tff_weights)): self.assertAllClose(self.evaluate(keras_w), self.evaluate(tff_w), atol=1e-4, msg='Variable [{}]'.format(keras_w.name)) assert_all_weights_close(keras_model.trainable_weights, tff_weights.trainable) assert_all_weights_close(keras_model.non_trainable_weights, tff_weights.non_trainable)
def model_fn(): dummy_batch = collections.OrderedDict([ ('x', tf.constant([['R']], tf.string)), ('y', tf.zeros([1, 1], tf.float32)), ]) keras_model = model_examples.build_lookup_table_keras_model() return keras_utils.from_compiled_keras_model( keras_model, dummy_batch, loss=tf.keras.losses.MeanSquaredError(), metrics=[])
def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(784, )), tf.keras.layers.Dense(10, kernel_initializer="zeros", bias_initializer="zeros", activation=tf.nn.softmax) ]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.SGD(0.1)) return keras_utils.from_compiled_keras_model( model, federated_train_data[0][0])
def test_keras_model_using_embeddings(self): model = model_examples.build_embedding_keras_model() def loss_fn(y_true, y_pred): loss_per_example = tf.keras.losses.sparse_categorical_crossentropy( y_true=y_true, y_pred=y_pred) return tf.reduce_mean(loss_per_example) model.compile(optimizer=tf.keras.optimizers.Adam(), loss=loss_fn, metrics=[NumBatchesCounter(), NumExamplesCounter()]) dummy_batch = collections.OrderedDict([ ('x', np.zeros([1])), ('y', np.zeros([1])), ]) tff_model = keras_utils.from_compiled_keras_model( keras_model=model, dummy_batch=dummy_batch) # Create a batch with the size of the vocab. These examples will attempt to # train the embedding so that the model produces # i -> (i / output_size) + 5 input_vocab_size = 10 output_vocab_size = 5 xs = [] ys = [] for input_id in range(input_vocab_size): xs.append(input_id) ys.append((input_id / output_vocab_size + 5) % output_vocab_size) batch = { 'x': np.expand_dims(np.array(xs, dtype=np.int64), axis=-1), 'y': np.expand_dims(np.array(ys, dtype=np.int64), axis=-1), } prior_loss = float('inf') num_iterations = 3 for _ in range(num_iterations): r = self.evaluate(tff_model.train_on_batch(batch)) self.assertLess(r.loss, prior_loss) prior_loss = r.loss m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_iterations]) self.assertEqual(m['num_examples'], [input_vocab_size * num_iterations]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], input_vocab_size * num_iterations)
def model_fn(): keras_model = tf.keras.Sequential([ tf.keras.layers.Dense(1, kernel_initializer='ones', bias_initializer='zeros', activation=None) ], name='my_model') keras_model.compile(loss='mean_squared_error', optimizer='sgd', metrics=[tf.keras.metrics.Accuracy()]) return keras_utils.from_compiled_keras_model( keras_model, dummy_batch=collections.OrderedDict([ ('x', np.zeros((1, 1), np.float32)), ('y', np.zeros((1, 1), np.float32)), ]))
def test_tff_model_from_compiled_keras_model(self, feature_dims, model_fn, loss_fn): keras_model = model_fn(feature_dims) # If the model is intended to be used for training, it must be compiled. keras_model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss=loss_fn, metrics=[NumBatchesCounter(), NumExamplesCounter()]) with warnings.catch_warnings(record=True) as w: tff_model = keras_utils.from_compiled_keras_model( keras_model=keras_model, dummy_batch=_create_dummy_batch(feature_dims)) self.assertLen(w, 1) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) # Metrics should be zero, though the model wrapper internally executes the # forward pass once. self.assertSequenceEqual(self.evaluate(tff_model.local_variables), [0, 0, 0.0, 0.0, 0.0]) batch = { 'x': np.stack([ np.zeros(feature_dims, np.float32), np.full(feature_dims, 5.0, np.float32), ]), 'y': [[0.0], [5.0 * feature_dims]], } prior_loss = float('inf') num_iterations = 3 for _ in range(num_iterations): output = self.evaluate(tff_model.train_on_batch(batch)) self.assertLess(output.loss, prior_loss) prior_loss = output.loss metrics = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(metrics['num_batches'], [num_iterations]) self.assertEqual(metrics['num_examples'], [2 * num_iterations]) self.assertGreater(metrics['loss'][0], 0) self.assertEqual(metrics['loss'][1], 2 * num_iterations) self.assertAllGreater( metrics['keras_training_time_client_sum_sec'], [0] * len(metrics['keras_training_time_client_sum_sec']))
def _model_fn(): return keras_utils.from_compiled_keras_model( keras_model=_make_keras_model(), dummy_batch=_create_dummy_batch(feature_dims))