def test_input_spec_batch_types_type_errors(self, input_spec, error_message): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) with self.assertRaisesRegex(TypeError, error_message): keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError())
def test_keras_model_fails_compiled(self): feature_dims = 3 keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims) keras_model.compile(loss=tf.keras.losses.MeanSquaredError()) with self.assertRaisesRegex(ValueError, 'compile'): keras_utils.from_keras_model( keras_model=keras_model, input_spec=_create_dummy_types(feature_dims), loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()])
def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=_get_character_recognition_model( model_id, only_digits, debug_seed), loss=tf.keras.losses.SparseCategoricalCrossentropy(), input_spec=task_datasets.element_type_structure, metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def test_keras_model_using_embeddings(self): model = model_examples.build_embedding_keras_model() dummy_batch = collections.OrderedDict(x=np.zeros([1]), y=np.zeros([1])) tff_model = keras_utils.from_keras_model( keras_model=model, dummy_batch=dummy_batch, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) # Create a batch with the size of the vocab. These examples will attempt to # train the embedding so that the model produces # i -> (i / output_size) + 5 input_vocab_size = 10 output_vocab_size = 5 xs = [] ys = [] for input_id in range(input_vocab_size): xs.append(input_id) ys.append((input_id / output_vocab_size + 5) % output_vocab_size) batch = collections.OrderedDict( x=np.expand_dims(np.array(xs, dtype=np.int64), axis=-1), y=np.expand_dims(np.array(ys, dtype=np.int64), axis=-1)) num_train_steps = 3 for _ in range(num_train_steps): batch_output = self.evaluate(tff_model.forward_pass(batch)) self.assertGreater(batch_output.loss, 0.0) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_train_steps]) self.assertEqual(m['num_examples'], [input_vocab_size * num_train_steps]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], input_vocab_size * num_train_steps)
def model_fn(): keras_model = build_keras_model_fn(feature_dims=2) return keras_utils.from_keras_model( keras_model, dummy_batch, loss=tf.keras.losses.MeanSquaredError(), metrics=[])
def test_tff_model_type_spec_from_keras_model_unspecified_sequence_len( self): keras_model = tf.keras.Sequential([ tf.keras.layers.Input(shape=(None, )), tf.keras.layers.Embedding(input_dim=10, output_dim=10), tf.keras.layers.LSTM(1) ]) input_spec = [ tf.TensorSpec(shape=[None, None], dtype=tf.int64), tf.TensorSpec(shape=[None], dtype=tf.float32) ] tff_model = keras_utils.from_keras_model( keras_model=keras_model, loss=tf.keras.losses.MeanSquaredError(), input_spec=input_spec) self.assertIsInstance(tff_model, model_utils.EnhancedModel) self.assertEqual(tff_model.input_spec, input_spec) batch = collections.OrderedDict(x=np.ones([2, 5], np.int64), y=[0.0, 1.0]) output = tff_model.forward_pass(batch) self.assertAllEqual(output.predictions.shape, [2, 1]) # A batch with different sequence length should be processed in a similar # way batch = collections.OrderedDict(x=np.ones([2, 10], np.int64), y=[0.0, 1.0]) output = tff_model.forward_pass(batch) self.assertAllEqual(output.predictions.shape, [2, 1])
def model_fn(): keras_model = model_examples.build_lookup_table_keras_model() return keras_utils.from_keras_model( keras_model, loss=tf.keras.losses.MeanSquaredError(), input_spec=ds.element_spec, metrics=[NumExamplesCounter()])
def test_tff_model_from_keras_model_with_custom_loss_with_integer_label( self): class _CustomLossRequiringLabelBeInteger(tf.keras.losses.Loss): def __init__(self): super().__init__(name='custom_loss_requiring_label_be_integer') def call(self, y_true, y_pred): # Note that this TF function requires that the label `y_true` be of an # integer dtype; a TypeError is thrown if `y_true` isn't int32 or int64. return tf.nn.sparse_softmax_cross_entropy_with_logits( y_true, y_pred) keras_model = tf.keras.Sequential( [tf.keras.Input(shape=(2, )), tf.keras.layers.Dense(units=10)]) input_spec = [ tf.TensorSpec(shape=[1, 2], dtype=tf.float32), tf.TensorSpec(shape=[1], dtype=tf.int64) ] tff_model = keras_utils.from_keras_model( keras_model=keras_model, loss=_CustomLossRequiringLabelBeInteger(), input_spec=input_spec) batch = collections.OrderedDict(x=tf.convert_to_tensor( np.ones((1, 2)), dtype=tf.float32), y=tf.convert_to_tensor([0], dtype=tf.int64)) # Expect this call to .forward_pass to succeed (no Errors raised). tff_model.forward_pass(batch)
def _model_fn(): return keras_utils.from_keras_model( keras_model=_make_keras_model(), input_spec=_create_dummy_types(feature_dims), loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()])
def model_fn(): keras_model = build_keras_model_fn(feature_dims=2) return keras_utils.from_keras_model( keras_model, loss=tf.keras.losses.MeanSquaredError(), input_spec=ds.element_spec, metrics=[NumExamplesCounter()])
def model_fn(): # Note: we don't compile with an optimizer here; FedSGD does not use it. keras_model = build_keras_model_fn(feature_dims=2) return keras_utils.from_keras_model( keras_model, input_spec=ds1.element_spec, loss=tf.keras.losses.MeanSquaredError())
def test_keras_model_multiple_inputs(self): input_spec = collections.OrderedDict(x=collections.OrderedDict( a=tf.TensorSpec(shape=[None, 1], dtype=tf.float32), b=tf.TensorSpec(shape=[1, 1], dtype=tf.float32)), y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32)) model = model_examples.build_multiple_inputs_keras_model() tff_model = keras_utils.from_keras_model( keras_model=model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) batch_size = 2 real_batch = collections.OrderedDict( x=collections.OrderedDict(a=np.ones(shape=[batch_size, 1], dtype=np.float32), b=np.ones(shape=[batch_size, 1], dtype=np.float32)), y=np.asarray([[2.0], [2.0]]).astype(np.float32)) num_train_steps = 2 for _ in range(num_train_steps): self.evaluate(tff_model.forward_pass(real_batch)) m = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(m['num_batches'], [num_train_steps]) self.assertEqual(m['num_examples'], [batch_size * num_train_steps]) self.assertGreater(m['loss'][0], 0.0) self.assertEqual(m['loss'][1], batch_size * num_train_steps) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_multiple_inputs_keras_model() tff_weights.assign_weights_to(keras_model) loaded_model = keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) orig_model_output = tff_model.forward_pass(real_batch) loaded_model_output = loaded_model.forward_pass(real_batch) self.assertAlmostEqual(self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))
def tff_model_builder(): x_type = tf.TensorSpec(shape=(None, 1), dtype=tf.float32) input_spec = (x_type, x_type) keras_model = keras_model_builder() loss = tf.keras.losses.MeanSquaredError() return keras_utils.from_keras_model(keras_model, loss, input_spec=input_spec)
def test_keras_model_preprocessing(self): self.skipTest('b/171254807') model = model_examples.build_preprocessing_lookup_keras_model() input_spec = collections.OrderedDict(x=tf.TensorSpec(shape=[None, 1], dtype=tf.string), y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32)) tff_model = keras_utils.from_keras_model( keras_model=model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) batch_size = 3 batch = collections.OrderedDict(x=tf.constant([['A'], ['B'], ['A']], dtype=tf.string), y=tf.constant([[0], [1], [1]], dtype=tf.float32)) num_train_steps = 2 for _ in range(num_train_steps): self.evaluate(tff_model.forward_pass(batch)) metrics = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(metrics['num_batches'], [num_train_steps]) self.assertEqual(metrics['num_examples'], [batch_size * num_train_steps]) self.assertGreater(metrics['loss'][0], 0.0) self.assertEqual(metrics['loss'][1], batch_size * num_train_steps) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_lookup_table_keras_model() tff_weights.assign_weights_to(keras_model) loaded_model = keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) orig_model_output = tff_model.forward_pass(batch) loaded_model_output = loaded_model.forward_pass(batch) self.assertAlmostEqual(self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))
def model_fn(): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=2) loss_fn = tf.keras.losses.MeanSquaredError() input_spec = dataset.element_spec return keras_utils.from_keras_model(keras_model, loss=loss_fn, input_spec=input_spec)
def test_input_spec_batch_types(self, input_spec): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) tff_model = keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError()) self.assertIsInstance(tff_model, model_utils.EnhancedModel)
def model_fn(): inputs = tf.keras.Input(shape=(2,)) # feature dim = 2 outputs = tf.keras.layers.Dense(1)(inputs) keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) return keras_utils.from_keras_model( keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError())
def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=word_prediction_models.create_recurrent_model( vocab_size=extended_vocab_size), loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True), input_spec=task_datasets.element_type_structure, metrics=metrics_builder())
def model_fn(): dummy_batch = collections.OrderedDict( x=tf.constant([['R']], tf.string), y=tf.zeros([1, 1], tf.float32)) keras_model = model_examples.build_lookup_table_keras_model() return keras_utils.from_keras_model( keras_model, dummy_batch, loss=tf.keras.losses.MeanSquaredError(), metrics=[])
def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=emnist_models.create_autoencoder_model(), loss=tf.keras.losses.MeanSquaredError(), input_spec=task_datasets.element_type_structure, metrics=[ tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.MeanAbsoluteError() ])
def model_fn(): keras_model = model_examples.build_linear_regression_keras_functional_model( ) return keras_utils.from_keras_model( keras_model, loss=tf.keras.losses.MeanSquaredError(), input_spec=collections.OrderedDict( x=tf.TensorSpec(shape=[None, 2]), y=tf.TensorSpec(shape=[None, 1])))
def model_fn(): inputs = tf.keras.Input(shape=(2,)) # feature dim = 2 outputs = tf.keras.layers.Dense(1)(inputs) keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) dummy_batch = collections.OrderedDict([ ('x', np.zeros([1, 2], dtype=np.float32)), ('y', np.zeros([1, 1], dtype=np.float32)) ]) return keras_utils.from_keras_model(keras_model, dummy_batch, tf.keras.losses.MeanSquaredError())
def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=char_prediction_models.create_recurrent_model( vocab_size=VOCAB_LENGTH, sequence_length=sequence_length), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), input_spec=task_datasets.element_type_structure, metrics=[ keras_metrics.NumTokensCounter(masked_tokens=[pad_token]), keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token]) ])
def test_input_spec_python_container(self, input_spec): keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) tff_model = keras_utils.from_keras_model( keras_model=keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError()) self.assertIsInstance(tff_model, model_utils.EnhancedModel) tf.nest.map_structure(lambda x: self.assertIsInstance(x, tf.TensorSpec), tff_model.input_spec)
def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=_build_logistic_regression_model( input_size=word_vocab_size, output_size=tag_vocab_size), loss=tf.keras.losses.BinaryCrossentropy( from_logits=False, reduction=tf.keras.losses.Reduction.SUM), input_spec=task_datasets.element_type_structure, metrics=[ tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(top_k=5, name='recall_at_5'), ])
def test_keras_model_lookup_table(self): model = model_examples.build_lookup_table_keras_model() dummy_batch = collections.OrderedDict( x=tf.constant([['G']], dtype=tf.string), y=tf.zeros([1, 1], dtype=tf.float32)) tff_model = keras_utils.from_keras_model( keras_model=model, dummy_batch=dummy_batch, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) batch_size = 3 batch = collections.OrderedDict( x=tf.constant([['G'], ['B'], ['R']], dtype=tf.string), y=tf.constant([[1.0], [2.0], [3.0]], dtype=tf.float32)) num_train_steps = 2 for _ in range(num_train_steps): self.evaluate(tff_model.forward_pass(batch)) metrics = self.evaluate(tff_model.report_local_outputs()) self.assertEqual(metrics['num_batches'], [num_train_steps]) self.assertEqual(metrics['num_examples'], [batch_size * num_train_steps]) self.assertGreater(metrics['loss'][0], 0.0) self.assertEqual(metrics['loss'][1], batch_size * num_train_steps) # Ensure we can assign the FL trained model weights to a new model. tff_weights = model_utils.ModelWeights.from_model(tff_model) keras_model = model_examples.build_lookup_table_keras_model() tff_weights.assign_weights_to(keras_model) loaded_model = keras_utils.from_keras_model( keras_model=keras_model, dummy_batch=dummy_batch, loss=tf.keras.losses.MeanSquaredError(), metrics=[NumBatchesCounter(), NumExamplesCounter()]) orig_model_output = tff_model.forward_pass(batch) loaded_model_output = loaded_model.forward_pass(batch) self.assertAlmostEqual( self.evaluate(orig_model_output.loss), self.evaluate(loaded_model_output.loss))
def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(784, )), tf.keras.layers.Dense(10, kernel_initializer="zeros", bias_initializer="zeros", activation=tf.nn.softmax) ]) return keras_utils.from_keras_model( model, dummy_batch=federated_train_data[0][0], loss=tf.keras.losses.SparseCategoricalCrossentropy())
def model_fn(): inputs = tf.keras.Input(shape=(2,)) # feature dim = 2 outputs = tf.keras.layers.Dense(1)(inputs) keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) input_spec = collections.OrderedDict([ ('x', tf.TensorSpec([None, 2], dtype=tf.float32)), ('y', tf.TensorSpec([None, 1], dtype=tf.float32)) ]) return keras_utils.from_keras_model( keras_model, input_spec=input_spec, loss=tf.keras.losses.MeanSquaredError())
def test_keras_model_and_optimizer(self): # Expect TFF to compile the keras model if given an optimizer. keras_model = model_examples.build_linear_regression_keras_functional_model( feature_dims=1) tff_model = keras_utils.from_keras_model( keras_model=keras_model, dummy_batch=_create_dummy_batch(1), loss=tf.keras.losses.MeanSquaredError(), optimizer=tf.keras.optimizers.SGD(learning_rate=0.01)) self.assertIsInstance(tff_model, model_utils.EnhancedTrainableModel) # pylint: disable=internal-access self.assertTrue(hasattr(tff_model._model._keras_model, 'optimizer'))
def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(784, )), tf.keras.layers.Dense(10, kernel_initializer='zeros', bias_initializer='zeros', activation=tf.nn.softmax) ]) return keras_utils.from_keras_model( model, input_spec=input_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy())