def test_broadcast_process_from_model(self): model_fn = model_examples.LinearRegression broadcast_process = ( encoding_utils.build_encoded_broadcast_process_from_model( model_fn, _test_encoder_fn('simple'))) self.assertIsInstance(broadcast_process, measured_process.MeasuredProcess)
def test_iterative_process_with_encoding_process(self): model_fn = model_examples.LinearRegression gather_process = encoding_utils.build_encoded_mean_process_from_model( model_fn, _test_encoder_fn('gather')) broadcast_process = ( encoding_utils.build_encoded_broadcast_process_from_model( model_fn, _test_encoder_fn('simple'))) iterative_process = optimizer_utils.build_model_delta_optimizer_process( model_fn=model_fn, model_to_client_delta_fn=DummyClientDeltaFn, server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0), aggregation_process=gather_process, broadcast_process=broadcast_process) self._verify_iterative_process(iterative_process)
def test_federated_evaluation_quantized_aggressively(self): # Set up a uniform quantization encoder as the broadcaster. broadcaster = ( encoding_utils.build_encoded_broadcast_process_from_model( TestModelQuant, _build_simple_quant_encoder(2))) self.assert_types_equivalent(broadcaster.next.type_signature, _build_expected_broadcaster_next_signature()) evaluate = federated_evaluation.build_federated_evaluation( TestModelQuant, broadcast_process=broadcaster) # Confirm that the type signature matches what is expected. self.assert_types_identical( evaluate.type_signature, _build_expected_test_quant_model_eval_signature()) def _temp_dict(temps): return {'temp': np.array(temps, dtype=np.float32)} result = evaluate( collections.OrderedDict( trainable=[[5.0, 10.0, 5.0, 7.0]], non_trainable=[]), [ [ _temp_dict([1.0, 10.0, 2.0, 7.0]), _temp_dict([6.0, 11.0, 5.0, 8.0]) ], [_temp_dict([9.0, 12.0, 13.0, 7.0])], [ _temp_dict([1.0, 22.0, 23.0, 24.0]), _temp_dict([5.0, 10.0, 5.0, 7.0]) ], ]) # This very aggressive quantization should be so lossy that some of the # data is changed during encoding so the number that are equal between # the original and the final result should not be 8 as it is in the # conservative quantization test above. self.assertContainsSubset(result.keys(), ['eval', 'stat']) self.assertContainsSubset(result['eval'].keys(), ['num_same']) self.assertLess(result['eval']['num_same'], 8.0) self.assertContainsSubset(result['stat'].keys(), ['num_examples']) self.assertEqual(result['stat']['num_examples'], 20)
def test_federated_evaluation_quantized_conservatively(self): # Set up a uniform quantization encoder as the broadcaster. broadcaster = ( encoding_utils.build_encoded_broadcast_process_from_model( TestModelQuant, _build_simple_quant_encoder(12))) type_test_utils.assert_types_equivalent( broadcaster.next.type_signature, _build_expected_broadcaster_next_signature()) evaluate = federated_evaluation.build_federated_evaluation( TestModelQuant, broadcast_process=broadcaster) # Confirm that the type signature matches what is expected. type_test_utils.assert_types_identical( evaluate.type_signature, _build_expected_test_quant_model_eval_signature()) def _temp_dict(temps): return {'temp': np.array(temps, dtype=np.float32)} result = evaluate( collections.OrderedDict(trainable=[[5.0, 10.0, 5.0, 7.0]], non_trainable=[]), [ [ _temp_dict([1.0, 10.0, 2.0, 7.0]), _temp_dict([6.0, 11.0, 5.0, 8.0]) ], [_temp_dict([9.0, 12.0, 13.0, 7.0])], [ _temp_dict([1.0, 22.0, 23.0, 24.0]), _temp_dict([5.0, 10.0, 5.0, 7.0]) ], ]) # This conservative quantization should not be too lossy. # When comparing the data examples to trainable, there are 8 times # where the index and value match. self.assertEqual( result, collections.OrderedDict(eval=collections.OrderedDict( num_same=8.0)))