Example #1
0
    def test_canonical_form_with_learning_structure_does_not_change_execution_of_iterative_process(
            self):
        if tf.config.list_logical_devices('GPU'):
            self.skipTest(
                'b/137602785: bring GPU test back after the fix for `wrap_function`'
            )
        ip_1 = construct_example_training_comp()
        cf = tff.backends.mapreduce.get_canonical_form_for_iterative_process(
            ip_1)
        ip_2 = tff.backends.mapreduce.get_iterative_process_for_canonical_form(
            cf)

        ip_1.initialize.type_signature.check_equivalent_to(
            ip_2.initialize.type_signature)
        # The next functions type_signatures may not be equal, since we may have
        # appended an empty tuple as client side-channel outputs if none existed.
        ip_1.next.type_signature.parameter.check_equivalent_to(
            ip_2.next.type_signature.parameter)
        ip_1.next.type_signature.result.check_equivalent_to(
            ip_2.next.type_signature.result)

        sample_batch = collections.OrderedDict(
            x=np.array([[1., 1.]], dtype=np.float32),
            y=np.array([[0]], dtype=np.int32),
        )
        client_data = [sample_batch]
        state_1 = ip_1.initialize()
        server_state_1, server_output_1 = ip_1.next(state_1, [client_data])
        server_state_1 = structure.from_container(server_state_1,
                                                  recursive=True)
        server_output_1 = structure.from_container(server_output_1,
                                                   recursive=True)
        server_state_1_arrays = structure.flatten(server_state_1)
        server_output_1_arrays = structure.flatten(server_output_1)
        state_2 = ip_2.initialize()
        server_state_2, server_output_2 = ip_2.next(state_2, [client_data])
        server_state_2_arrays = structure.flatten(server_state_2)
        server_output_2_arrays = structure.flatten(server_output_2)

        self.assertEmpty(server_state_1.model_broadcast_state)
        # Note that we cannot simply use assertEqual because the values may differ
        # due to floating point issues.
        self.assertTrue(
            structure.is_same_structure(server_state_1, server_state_2))
        self.assertTrue(
            structure.is_same_structure(server_output_1, server_output_2))
        self.assertAllClose(server_state_1_arrays, server_state_2_arrays)
        self.assertAllClose(server_output_1_arrays[:2],
                            server_output_2_arrays[:2])
Example #2
0
    async def test_returns_value(self, coro, type_signature, expected_value):
        actual_value = native_platform._create_structure_of_coro_references(
            coro=coro, type_signature=type_signature)

        if (isinstance(actual_value, structure.Struct)
                and isinstance(expected_value, structure.Struct)):
            structure.is_same_structure(actual_value, expected_value)
            actual_value = structure.flatten(actual_value)
            expected_value = structure.flatten(expected_value)
            for a, b in zip(actual_value, expected_value):
                a = await a.get_value()
                b = await b.get_value()
                self.assertEqual(a, b)
        else:
            actual_value = await actual_value.get_value()
            expected_value = await expected_value.get_value()
            self.assertEqual(actual_value, expected_value)
Example #3
0
 def test_is_same_structure_check_types(self):
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct([('a', 10)]), structure.Struct([('a', 20)])))
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct([
               ('a', 10),
               ('b', structure.Struct([('z', 5)])),
           ]),
           structure.Struct([
               ('a', 20),
               ('b', structure.Struct([('z', 50)])),
           ])))
   self.assertFalse(
       structure.is_same_structure(
           structure.Struct([('x', {
               'y': 4
           })]), structure.Struct([('x', {
               'y': 5,
               'z': 6
           })])))
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct([('x', {
               'y': 5
           })]), structure.Struct([('x', {
               'y': 6
           })])))
   with self.assertRaises(TypeError):
     structure.is_same_structure(
         {'x': 5.0},  # not an Struct
         structure.Struct([('x', 5.0)]))
Example #4
0
 def fake_secure_sum(value, bitwidth):
     # TODO(b/165856119): update parameter validation to reflect
     # `federated_secure_sum` it it becomes possible to broadcast `bitwidth`.
     value_type = value.type_signature.member
     if value_type.is_struct():
         bitwidth_struct = structure.from_container(bitwidth)
         if not structure.is_same_structure(value_type, bitwidth_struct):
             raise TypeError(
                 'value and bitwidth must have the same structure.\n'
                 'value: {v}\nbitwidth:{b}'.format(
                     v=value.type_signature.member,
                     b=bitwidth.type_signature))
     return federated_aggregations.intrinsics.federated_sum(value)
Example #5
0
 def test_is_same_structure_check_types(self):
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct.named(a=10), structure.Struct.named(a=20)))
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct.named(
               a=10,
               b=structure.Struct.named(z=5),
           ), structure.Struct.named(a=20, b=structure.Struct.named(z=50))))
   self.assertFalse(
       structure.is_same_structure(
           structure.Struct.named(x=dict(y=4)),
           structure.Struct.named(x=dict(y=5, z=6))))
   self.assertTrue(
       structure.is_same_structure(
           structure.Struct.named(x=dict(y=5)),
           structure.Struct.named(x=dict(y=6))))
   with self.assertRaises(TypeError):
     structure.is_same_structure(
         {'x': 5.0},  # not a Struct
         structure.Struct.named(x=5.0))
    async def compute_federated_secure_sum(
        self, arg: federated_resolving_strategy.FederatedResolvingStrategyValue
    ) -> federated_resolving_strategy.FederatedResolvingStrategyValue:
        logging.warning(
            'The implementation of the `tff.federated_secure_sum` intrinsic '
            'provided by the `tff.backends.test` runtime uses no cryptography.'
        )
        py_typecheck.check_type(arg.internal_representation, structure.Struct)
        py_typecheck.check_len(arg.internal_representation, 2)
        summands, bitwidth = await asyncio.gather(
            self.ingest_value(arg.internal_representation[0],
                              arg.type_signature[0]).compute(),
            self.ingest_value(arg.internal_representation[1],
                              arg.type_signature[1]).compute())
        summands_type = arg.type_signature[0].member
        if not type_analysis.is_structure_of_integers(summands_type):
            raise TypeError(
                'Cannot compute `federated_secure_sum` on summands that are not '
                'TensorType or StructType of TensorType. Got {t}'.format(
                    t=repr(summands_type)))
        if (summands_type.is_struct()
                and not structure.is_same_structure(summands_type, bitwidth)):
            raise TypeError(
                'Cannot compute `federated_secure_sum` if summands and bitwidth are '
                'not the same structure. Got summands={s}, bitwidth={b}'.
                format(s=repr(summands_type), b=repr(bitwidth.type_signature)))

        num_additional_bits = await self._compute_extra_bits_for_secagg()
        # Clamp to 64 bits, otherwise we can't represent the mask in TensorFlow.
        extended_bitwidth = _map_numpy_or_structure(
            bitwidth, fn=lambda b: min(b.numpy() + num_additional_bits, 64))
        logging.debug('Emulated secure sum effective bitwidth: %s',
                      extended_bitwidth)
        # Now we need to cast the summands into the integral type that is large
        # enough to represent the sum and the mask.
        summation_type_spec = _compute_summation_type_for_bitwidth(
            extended_bitwidth, summands_type)
        # `summands` is a list of all clients' summands. We map
        # `_map_numpy_or_structure` to the list, applying it pointwise to clients.
        summand_tensors = tf.nest.map_structure(_extract_numpy_arrays,
                                                summands)
        # Dtype conversion trick: pull the summand values out, and push them back
        # into the executor using the new dtypes decided based on bitwidth.
        casted_summands = await self._executor.create_value(
            summand_tensors, computation_types.at_clients(summation_type_spec))
        # To emulate SecAgg without the random masks, we must mask the summands to
        # the effective bitwidth. This isn't strictly necessary because we also
        # mask the sum result and modulus operator is distributive, but this more
        # accurately reflects the system.
        mask = await self._embed_tf_secure_sum_mask_value(
            summation_type_spec, extended_bitwidth)
        masked_summands = await self._compute_modulus(casted_summands, mask)
        logging.debug('Computed masked modular summands as: %s', await
                      masked_summands.compute())
        # Then perform the sum and modolulo operation (using powers of 2 bitmasking)
        # on the sum, using the computed effective bitwidth.
        sum_result = await self.compute_federated_sum(masked_summands)
        modular_sums = await self._compute_modulus(sum_result, mask)
        # Dtype conversion trick again, pull the modular sum values out, and push
        # them back into the executor using the dypte from the summands.
        modular_sum_values = _extract_numpy_arrays(await
                                                   modular_sums.compute())
        logging.debug('Computed modular sums as: %s', modular_sum_values)
        return await self._executor.create_value(
            modular_sum_values, computation_types.at_server(summands_type))
def create_binary_operator_with_upcast(
        type_signature: computation_types.StructType,
        operator: Callable[[Any, Any], Any]) -> ComputationProtoAndType:
    """Creates TF computation upcasting its argument and applying `operator`.

  Args:
    type_signature: A `computation_types.StructType` with two elements, both
      only containing structs or tensors in their type tree. The first and
      second element must match in structure, or the second element may be a
      single tensor type that is broadcasted (upcast) to the leaves of the
      structure of the first type.
    operator: Callable defining the operator.

  Returns:
    Same as `create_binary_operator()`.
  """
    py_typecheck.check_type(type_signature, computation_types.StructType)
    py_typecheck.check_callable(operator)
    type_analysis.check_tensorflow_compatible_type(type_signature)
    if not type_signature.is_struct() or len(type_signature) != 2:
        raise TypeError(
            'To apply a binary operator, we must by definition have an '
            'argument which is a `StructType` with 2 elements; '
            'asked to create a binary operator for type: {t}'.format(
                t=type_signature))
    if type_analysis.contains(type_signature, lambda t: t.is_sequence()):
        raise TypeError('Applying binary operators in TensorFlow is only '
                        'supported on Tensors and StructTypes; you '
                        'passed {t} which contains a SequenceType.'.format(
                            t=type_signature))

    def _pack_into_type(to_pack, type_spec):
        """Pack Tensor value `to_pack` into the nested structure `type_spec`."""
        if type_spec.is_struct():
            elem_iter = structure.iter_elements(type_spec)
            return structure.Struct([(elem_name,
                                      _pack_into_type(to_pack, elem_type))
                                     for elem_name, elem_type in elem_iter])
        elif type_spec.is_tensor():
            return tf.broadcast_to(to_pack, type_spec.shape)

    with tf.Graph().as_default() as graph:
        first_arg, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
            'x', type_signature[0], graph)
        operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
            'y', type_signature[1], graph)

        if type_signature[0].is_struct() and type_signature[1].is_struct():
            # If both the first and second arguments are structs with the same
            # structure, simply re-use operand_2_value as. `tf.nest.map_structure`
            # below will map the binary operator pointwise to the leaves of the
            # structure.
            if structure.is_same_structure(type_signature[0],
                                           type_signature[1]):
                second_arg = operand_2_value
            else:
                raise TypeError(
                    'Cannot upcast one structure to a different structure. '
                    '{x} -> {y}'.format(x=type_signature[1],
                                        y=type_signature[0]))
        elif type_signature[0].is_equivalent_to(type_signature[1]):
            second_arg = operand_2_value
        else:
            second_arg = _pack_into_type(operand_2_value, type_signature[0])

        if type_signature[0].is_tensor():
            result_value = operator(first_arg, second_arg)
        elif type_signature[0].is_struct():
            result_value = structure.map_structure(operator, first_arg,
                                                   second_arg)
        else:
            raise TypeError(
                'Encountered unexpected type {t}; can only handle Tensor '
                'and StructTypes.'.format(t=type_signature[0]))

    result_type, result_binding = tensorflow_utils.capture_result_from_graph(
        result_value, graph)

    type_signature = computation_types.FunctionType(type_signature,
                                                    result_type)
    parameter_binding = pb.TensorFlow.Binding(
        struct=pb.TensorFlow.StructBinding(
            element=[operand_1_binding, operand_2_binding]))
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=parameter_binding,
                               result=result_binding)
    return _tensorflow_comp(tensorflow, type_signature)