def federated_mean(self, value, weight): """Implements `federated_mean` as defined in `api/intrinsics.py`.""" # TODO(b/113112108): Possibly relax the constraints on numeric types, and # inject implicit casts where appropriate. For instance, we might want to # allow `tf.int32` values as the input, and automatically cast them to # `tf.float321 before invoking the average, thus producing a floating-point # result. # TODO(b/120439632): Possibly allow the weight to be either structured or # non-scalar, e.g., for the case of averaging a convolutional layer, when # we would want to use a different weight for every filter, and where it # might be cumbersome for users to have to manually slice and assemble a # variable. value = value_impl.to_value(value, None, self._context_stack) value = value_utils.ensure_federated_value(value, placement_literals.CLIENTS, 'value to be averaged') if not type_analysis.is_average_compatible(value.type_signature): raise TypeError( 'The value type {} is not compatible with the average operator.' .format(value.type_signature)) if weight is not None: weight = value_impl.to_value(weight, None, self._context_stack) weight = value_utils.ensure_federated_value( weight, placement_literals.CLIENTS, 'weight to use in averaging') py_typecheck.check_type(weight.type_signature.member, computation_types.TensorType) if weight.type_signature.member.shape.ndims != 0: raise TypeError( 'The weight type {} is not a federated scalar.'.format( weight.type_signature)) if not (weight.type_signature.member.dtype.is_integer or weight.type_signature.member.dtype.is_floating): raise TypeError( 'The weight type {} is not a federated integer or floating-point ' 'tensor.'.format(weight.type_signature)) value = value_impl.ValueImpl.get_comp(value) if weight is not None: weight = value_impl.ValueImpl.get_comp(weight) comp = building_block_factory.create_federated_mean(value, weight) comp = self._bind_comp_as_reference(comp) return value_impl.ValueImpl(comp, self._context_stack)
def test_returns_false(self, type_spec): self.assertFalse(type_analysis.is_average_compatible(type_spec))
def federated_mean(value, weight=None): """Computes a `tff.SERVER` mean of `value` placed on `tff.CLIENTS`. For values `v_1, ..., v_k`, and weights `w_1, ..., w_k`, this means `sum_{i=1}^k (w_i * v_i) / sum_{i=1}^k w_i`. Args: value: The value of which the mean is to be computed. Must be of a TFF federated type placed at `tff.CLIENTS`. The value may be structured, e.g., its member constituents can be named tuples. The tensor types that the value is composed of must be floating-point or complex. weight: An optional weight, a TFF federated integer or floating-point tensor value, also placed at `tff.CLIENTS`. Returns: A representation at the `tff.SERVER` of the mean of the member constituents of `value`, optionally weighted with `weight` if specified (otherwise, the member constituents contributed by all clients are equally weighted). Raises: TypeError: If `value` is not a federated TFF value placed at `tff.CLIENTS`, or if `weight` is not a federated integer or a floating-point tensor with the matching placement. """ # TODO(b/113112108): Possibly relax the constraints on numeric types, and # inject implicit casts where appropriate. For instance, we might want to # allow `tf.int32` values as the input, and automatically cast them to # `tf.float321 before invoking the average, thus producing a floating-point # result. # TODO(b/120439632): Possibly allow the weight to be either structured or # non-scalar, e.g., for the case of averaging a convolutional layer, when # we would want to use a different weight for every filter, and where it # might be cumbersome for users to have to manually slice and assemble a # variable. value = value_impl.to_value(value, None) value = value_utils.ensure_federated_value(value, placements.CLIENTS, 'value to be averaged') if not type_analysis.is_average_compatible(value.type_signature): raise TypeError( 'The value type {} is not compatible with the average operator.'. format(value.type_signature)) if weight is not None: weight = value_impl.to_value(weight, None) weight = value_utils.ensure_federated_value( weight, placements.CLIENTS, 'weight to use in averaging') py_typecheck.check_type(weight.type_signature.member, computation_types.TensorType) if weight.type_signature.member.shape.ndims != 0: raise TypeError( 'The weight type {} is not a federated scalar.'.format( weight.type_signature)) if not (weight.type_signature.member.dtype.is_integer or weight.type_signature.member.dtype.is_floating): raise TypeError( 'The weight type {} is not a federated integer or floating-point ' 'tensor.'.format(weight.type_signature)) weight_comp = None if weight is None else weight.comp comp = building_block_factory.create_federated_mean( value.comp, weight_comp) comp = _bind_comp_as_reference(comp) return value_impl.Value(comp)