def basic_federated_select_args(self): values = ['first', 'second', 'third'] server_val = intrinsics.federated_value(values, placements.SERVER) max_key_py = len(values) max_key = intrinsics.federated_value(max_key_py, placements.SERVER) def get_three_random_keys_fn(): return tf.random.uniform(shape=[3], minval=0, maxval=max_key_py, dtype=tf.int32) get_three_random_keys_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( get_three_random_keys_fn, None) get_three_random_keys = computation_impl.ConcreteComputation( get_three_random_keys_proto, context_stack_impl.context_stack) client_keys = intrinsics.federated_eval(get_three_random_keys, placements.CLIENTS) state_type = server_val.type_signature.member def _select_fn(arg): state = type_conversions.type_to_py_container(arg[0], state_type) key = arg[1] return tf.gather(state, key) select_fn_type = computation_types.StructType([state_type, tf.int32]) select_fn_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _select_fn, select_fn_type) select_fn = computation_impl.ConcreteComputation( select_fn_proto, context_stack_impl.context_stack) return (client_keys, max_key, server_val, select_fn)
def test_federated_aggregate_with_unknown_dimension(self): Accumulator = collections.namedtuple('Accumulator', ['samples']) # pylint: disable=invalid-name accumulator_type = computation_types.to_type( Accumulator(samples=computation_types.TensorType(dtype=tf.int32, shape=[None]))) x = _mock_data_of_type(computation_types.at_clients(tf.int32)) def initialize_fn(): return Accumulator(samples=tf.zeros(shape=[0], dtype=tf.int32)) initialize_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( initialize_fn, None) initialize = computation_impl.ConcreteComputation( initialize_proto, context_stack_impl.context_stack) zero = initialize() # The operator to use during the first stage simply adds an element to the # tensor, increasing its size. def _accumulate(arg): return Accumulator(samples=tf.concat( [arg[0].samples, tf.expand_dims(arg[1], axis=0)], axis=0)) accumulate_type = computation_types.StructType( [accumulator_type, tf.int32]) accumulate_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _accumulate, accumulate_type) accumulate = computation_impl.ConcreteComputation( accumulate_proto, context_stack_impl.context_stack) # The operator to use during the second stage simply adds total and count. def _merge(arg): return Accumulator( samples=tf.concat([arg[0].samples, arg[1].samples], axis=0)) merge_type = computation_types.StructType( [accumulator_type, accumulator_type]) merge_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _merge, merge_type) merge = computation_impl.ConcreteComputation( merge_proto, context_stack_impl.context_stack) # The operator to use during the final stage simply computes the ratio. report_proto, _ = tensorflow_computation_factory.create_identity( accumulator_type) report = computation_impl.ConcreteComputation( report_proto, context_stack_impl.context_stack) value = intrinsics.federated_aggregate(x, zero, accumulate, merge, report) self.assert_value(value, '<samples=int32[?]>@SERVER')
def test_federated_aggregate_with_client_int(self): # The representation used during the aggregation process will be a named # tuple with 2 elements - the integer 'total' that represents the sum of # elements encountered, and the integer element 'count'. Accumulator = collections.namedtuple('Accumulator', 'total count') # pylint: disable=invalid-name accumulator_type = computation_types.to_type( Accumulator(total=computation_types.TensorType(dtype=tf.int32), count=computation_types.TensorType(dtype=tf.int32))) x = _mock_data_of_type(computation_types.at_clients(tf.int32)) zero = Accumulator(0, 0) # The operator to use during the first stage simply adds an element to the # total and updates the count. def _accumulate(arg): return Accumulator(arg[0].total + arg[1], arg[0].count + 1) accumulate_type = computation_types.StructType( [accumulator_type, tf.int32]) accumulate_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _accumulate, accumulate_type) accumulate = computation_impl.ConcreteComputation( accumulate_proto, context_stack_impl.context_stack) # The operator to use during the second stage simply adds total and count. def _merge(arg): return Accumulator(arg[0].total + arg[1].total, arg[0].count + arg[1].count) merge_type = computation_types.StructType( [accumulator_type, accumulator_type]) merge_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _merge, merge_type) merge = computation_impl.ConcreteComputation( merge_proto, context_stack_impl.context_stack) # The operator to use during the final stage simply computes the ratio. def _report(arg): return tf.cast(arg.total, tf.float32) / tf.cast( arg.count, tf.float32) report_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _report, accumulator_type) report = computation_impl.ConcreteComputation( report_proto, context_stack_impl.context_stack) value = intrinsics.federated_aggregate(x, zero, accumulate, merge, report) self.assert_value(value, 'float32@SERVER')
def _wrap_sequence_as_value(elements, element_type, context_stack): """Wraps `elements` as a TFF sequence with elements of type `element_type`. Args: elements: Python object to the wrapped as a TFF sequence value. element_type: An instance of `Type` that determines the type of elements of the sequence. context_stack: The context stack to use. Returns: An instance of `tff.Value`. Raises: TypeError: If `elements` and `element_type` are of incompatible types. """ # TODO(b/113116813): Add support for other representations of sequences. py_typecheck.check_type(elements, list) py_typecheck.check_type(context_stack, context_stack_base.ContextStack) for element in elements: inferred_type = type_conversions.infer_type(element) if not element_type.is_assignable_from(inferred_type): raise TypeError( 'Expected all sequence elements to be {}, found {}.'.format( element_type, inferred_type)) def _create_dataset_from_elements(): return tensorflow_utils.make_data_set_from_elements( tf.compat.v1.get_default_graph(), elements, element_type) proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( fn=_create_dataset_from_elements, parameter_type=None) return _wrap_computation_as_value(proto, context_stack)
def test_federated_select_autozips_server_val(self, federated_select): client_keys, max_key, server_val, select_fn = ( self.basic_federated_select_args()) del server_val, select_fn values = ['first', 'second', 'third'] server_val_element = intrinsics.federated_value( values, placements.SERVER) server_val_dict = collections.OrderedDict(e1=server_val_element, e2=server_val_element) state_type = computation_types.StructType([ ('e1', server_val_element.type_signature.member), ('e2', server_val_element.type_signature.member), ]) def _select_fn_dict(arg): state = type_conversions.type_to_py_container(arg[0], state_type) key = arg[1] return (tf.gather(state['e1'], key), tf.gather(state['e2'], key)) select_fn_dict_type = computation_types.StructType( [state_type, tf.int32]) select_fn_dict_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _select_fn_dict, select_fn_dict_type) select_fn_dict = computation_impl.ConcreteComputation( select_fn_dict_proto, context_stack_impl.context_stack) result = federated_select(client_keys, max_key, server_val_dict, select_fn_dict) self.assert_value(result, '{<string,string>*}@CLIENTS')
def test_returns_computation(self, py_fn, type_signature, arg, expected_result): proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( py_fn, type_signature) self.assertIsInstance(proto, pb.Computation) actual_result = test_utils.run_tensorflow(proto, arg) self.assertEqual(actual_result, expected_result)
def _create_computation_greater_than_10_with_unused_parameter( ) -> computation_base.Computation: parameter_type = computation_types.StructType([ computation_types.TensorType(tf.int32), computation_types.TensorType(tf.int32), ]) computation_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( lambda arg: arg[0] > 10, parameter_type) return computation_impl.ConcreteComputation( computation_proto, context_stack_impl.context_stack)
def _wrap_constant_as_value(const) -> Value: """Wraps the given Python constant as a `tff.Value`. Args: const: Python constant convertible to Tensor via `tf.constant`. Returns: An instance of `tff.Value`. """ tf_comp, _ = tensorflow_computation_factory.create_computation_for_py_fn( fn=lambda: tf.constant(const), parameter_type=None) return _wrap_computation_as_value(tf_comp)
def test_infers_accumulate_return_as_merge_arg_merge_return_as_report_arg( self): type_spec = computation_types.TensorType(dtype=tf.int64, shape=[None]) x = _mock_data_of_type(computation_types.at_clients(tf.int64)) def initialize_fn(): return tf.constant([], dtype=tf.int64, shape=[0]) initialize_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( initialize_fn, None) initialize = computation_impl.ConcreteComputation( initialize_proto, context_stack_impl.context_stack) zero = initialize() def _accumulate(arg): return tf.concat([arg[0], [arg[1]]], 0) accumulate_type = computation_types.StructType([type_spec, tf.int64]) accumulate_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _accumulate, accumulate_type) accumulate = computation_impl.ConcreteComputation( accumulate_proto, context_stack_impl.context_stack) def _merge(arg): return tf.concat([arg[0], arg[1]], 0) merge_type = computation_types.StructType([type_spec, type_spec]) merge_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _merge, merge_type) merge = computation_impl.ConcreteComputation( merge_proto, context_stack_impl.context_stack) report_proto, _ = tensorflow_computation_factory.create_identity( type_spec) report = computation_impl.ConcreteComputation( report_proto, context_stack_impl.context_stack) value = intrinsics.federated_aggregate(x, zero, accumulate, merge, report) self.assert_value(value, 'int64[?]@SERVER')
def _wrap_constant_as_value(const, context_stack): """Wraps the given Python constant as a `tff.Value`. Args: const: Python constant to be converted to TFF value. Anything convertible to Tensor via `tf.constant` can be passed in. context_stack: The context stack to use. Returns: An instance of `value_base.Value`. """ tf_comp, _ = tensorflow_computation_factory.create_computation_for_py_fn( fn=lambda: tf.constant(const), parameter_type=None) return _wrap_computation_as_value(tf_comp, context_stack)
def test_federated_select_keys_must_be_int32(self, federated_select): client_keys, max_key, server_val, select_fn = ( self.basic_federated_select_args()) del client_keys def get_three_random_keys_fn(): return tf.random.uniform(shape=[3], minval=0, maxval=3, dtype=tf.int64) get_three_random_keys_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( get_three_random_keys_fn, None) get_three_random_keys = computation_impl.ConcreteComputation( get_three_random_keys_proto, context_stack_impl.context_stack) bad_client_keys = intrinsics.federated_eval(get_three_random_keys, placements.CLIENTS) with self.assertRaises(TypeError): federated_select(bad_client_keys, max_key, server_val, select_fn)
def test_federated_select_fn_must_take_int32_keys(self, federated_select): client_keys, max_key, server_val, select_fn = ( self.basic_federated_select_args()) del select_fn state_type = server_val.type_signature.member def _bad_select_fn(arg): state = type_conversions.type_to_py_container(arg[0], state_type) key = arg[1] return tf.gather(state, key) bad_select_fn_type = computation_types.StructType( [state_type, tf.int64]) bad_select_fn_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( _bad_select_fn, bad_select_fn_type) bad_select_fn = computation_impl.ConcreteComputation( bad_select_fn_proto, context_stack_impl.context_stack) with self.assertRaises(TypeError): federated_select(client_keys, max_key, server_val, bad_select_fn)
def _create_compiled_computation(py_fn, parameter_type): proto, type_signature = tensorflow_computation_factory.create_computation_for_py_fn( py_fn, parameter_type) return building_blocks.CompiledComputation(proto, type_signature=type_signature)
def test_raises_type_error_with_none(self, py_fn, type_signature): with self.assertRaises(TypeError): tensorflow_computation_factory.create_computation_for_py_fn( py_fn, type_signature)
def _create_computation_random() -> computation_base.Computation: computation_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( lambda: tf.random.normal([]), None) return computation_impl.ConcreteComputation( computation_proto, context_stack_impl.context_stack)
def _create_computation_reduce() -> computation_base.Computation: parameter_type = computation_types.SequenceType(tf.int32) computation_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( lambda ds: ds.reduce(np.int32(0), lambda x, y: x + y), parameter_type) return computation_impl.ConcreteComputation( computation_proto, context_stack_impl.context_stack)
def _create_computation_greater_than_10() -> computation_base.Computation: parameter_type = computation_types.TensorType(tf.int32) computation_proto, _ = tensorflow_computation_factory.create_computation_for_py_fn( lambda x: x > 10, parameter_type) return computation_impl.ConcreteComputation( computation_proto, context_stack_impl.context_stack)