示例#1
0
#   a = generic_partial_reduce(x, zero, accumulate, INTERMEDIATE_AGGREGATORS)
#   b = generic_reduce(a, zero, merge, SERVER)
#   c = generic_map(report, b)
#   return c
#
# Actual implementations might vary.
#
# Type signature: <{T}@CLIENTS,U,(<U,T>->U),(<U,U>->U),(U->R)> -> R@SERVER
FEDERATED_AGGREGATE = IntrinsicDef(
    'FEDERATED_AGGREGATE', 'federated_aggregate',
    computation_types.FunctionType(parameter=[
        type_constructors.at_clients(computation_types.AbstractType('T')),
        computation_types.AbstractType('U'),
        type_constructors.reduction_op(computation_types.AbstractType('U'),
                                       computation_types.AbstractType('T')),
        type_constructors.binary_op(computation_types.AbstractType('U')),
        computation_types.FunctionType(computation_types.AbstractType('U'),
                                       computation_types.AbstractType('R'))
    ],
                                   result=type_constructors.at_server(
                                       computation_types.AbstractType('R'))))

# Applies a given function to a value on the server.
#
# Type signature: <(T->U),T@SERVER> -> U@SERVER
FEDERATED_APPLY = IntrinsicDef(
    'FEDERATED_APPLY', 'federated_apply',
    computation_types.FunctionType(parameter=[
        computation_types.FunctionType(computation_types.AbstractType('T'),
                                       computation_types.AbstractType('U')),
        type_constructors.at_server(computation_types.AbstractType('T')),
示例#2
0
def create_dummy_intrinsic_def_federated_value_at_clients():
    value = intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS
    type_signature = computation_types.FunctionType(
        tf.float32, computation_types.at_clients(tf.float32, all_equal=True))
    return value, type_signature
示例#3
0
def create_dummy_computation_lambda_empty():
    """Returns a lambda computation and type `( -> <>)`."""
    value = computation_factory.create_lambda_empty_struct()
    type_signature = computation_types.FunctionType(None, [])
    return value, type_signature
示例#4
0
 def test_intrinsic_class_fails_named_tuple_type_with_names(self):
     with self.assertRaises(TypeError):
         _ = building_blocks.Intrinsic(
             intrinsic_defs.GENERIC_PLUS.uri,
             computation_types.FunctionType([('a', tf.int32),
                                             ('b', tf.int32)], tf.int32))
示例#5
0
def create_dummy_intrinsic_def_federated_eval_at_clients():
    value = intrinsic_defs.FEDERATED_EVAL_AT_CLIENTS
    type_signature = computation_types.FunctionType(
        computation_types.FunctionType(None, tf.float32),
        computation_types.at_clients(tf.float32))
    return value, type_signature
示例#6
0
async def compute_intrinsic_federated_weighted_mean(
    executor: executor_base.Executor, arg: executor_value_base.ExecutorValue
) -> executor_value_base.ExecutorValue:
  """Computes a federated weighted mean on the given `executor`.

  Args:
    executor: The executor to use.
    arg: The argument to embedded in `executor`.

  Returns:
    The result embedded in `executor`.
  """
  type_analysis.check_valid_federated_weighted_mean_argument_tuple_type(
      arg.type_signature)
  zip1_type = computation_types.FunctionType(
      computation_types.StructType([
          type_factory.at_clients(arg.type_signature[0].member),
          type_factory.at_clients(arg.type_signature[1].member)
      ]),
      type_factory.at_clients(
          computation_types.StructType(
              [arg.type_signature[0].member, arg.type_signature[1].member])))

  multiply_blk = building_block_factory.create_tensorflow_binary_operator_with_upcast(
      zip1_type.result.member, tf.multiply)

  map_type = computation_types.FunctionType(
      computation_types.StructType(
          [multiply_blk.type_signature, zip1_type.result]),
      type_factory.at_clients(multiply_blk.type_signature.result))

  sum1_type = computation_types.FunctionType(
      type_factory.at_clients(map_type.result.member),
      type_factory.at_server(map_type.result.member))

  sum2_type = computation_types.FunctionType(
      type_factory.at_clients(arg.type_signature[1].member),
      type_factory.at_server(arg.type_signature[1].member))

  zip2_type = computation_types.FunctionType(
      computation_types.StructType([sum1_type.result, sum2_type.result]),
      type_factory.at_server(
          computation_types.StructType(
              [sum1_type.result.member, sum2_type.result.member])))

  divide_blk = building_block_factory.create_tensorflow_binary_operator_with_upcast(
      zip2_type.result.member, tf.divide)

  async def _compute_multiply_fn():
    return await executor.create_value(multiply_blk.proto,
                                       multiply_blk.type_signature)

  async def _compute_multiply_arg():
    zip1_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS,
                                      zip1_type)
    zip_fn = await executor.create_value(zip1_comp, zip1_type)
    return await executor.create_call(zip_fn, arg)

  async def _compute_product_fn():
    map_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_MAP, map_type)
    return await executor.create_value(map_comp, map_type)

  async def _compute_product_arg():
    multiply_fn, multiply_arg = await asyncio.gather(_compute_multiply_fn(),
                                                     _compute_multiply_arg())
    return await executor.create_struct((multiply_fn, multiply_arg))

  async def _compute_products():
    product_fn, product_arg = await asyncio.gather(_compute_product_fn(),
                                                   _compute_product_arg())
    return await executor.create_call(product_fn, product_arg)

  async def _compute_total_weight():
    sum2_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_SUM, sum2_type)
    sum2_fn, sum2_arg = await asyncio.gather(
        executor.create_value(sum2_comp, sum2_type),
        executor.create_selection(arg, index=1))
    return await executor.create_call(sum2_fn, sum2_arg)

  async def _compute_sum_of_products():
    sum1_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_SUM, sum1_type)
    sum1_fn, products = await asyncio.gather(
        executor.create_value(sum1_comp, sum1_type), _compute_products())
    return await executor.create_call(sum1_fn, products)

  async def _compute_zip2_fn():
    zip2_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_ZIP_AT_SERVER,
                                      zip2_type)
    return await executor.create_value(zip2_comp, zip2_type)

  async def _compute_zip2_arg():
    sum_of_products, total_weight = await asyncio.gather(
        _compute_sum_of_products(), _compute_total_weight())
    return await executor.create_struct([sum_of_products, total_weight])

  async def _compute_divide_fn():
    return await executor.create_value(divide_blk.proto,
                                       divide_blk.type_signature)

  async def _compute_divide_arg():
    zip_fn, zip_arg = await asyncio.gather(_compute_zip2_fn(),
                                           _compute_zip2_arg())
    return await executor.create_call(zip_fn, zip_arg)

  async def _compute_apply_fn():
    apply_type = computation_types.FunctionType(
        computation_types.StructType(
            [divide_blk.type_signature, zip2_type.result]),
        type_factory.at_server(divide_blk.type_signature.result))
    apply_comp = create_intrinsic_comp(intrinsic_defs.FEDERATED_APPLY,
                                       apply_type)
    return await executor.create_value(apply_comp, apply_type)

  async def _compute_apply_arg():
    divide_fn, divide_arg = await asyncio.gather(_compute_divide_fn(),
                                                 _compute_divide_arg())
    return await executor.create_struct([divide_fn, divide_arg])

  async def _compute_divided():
    apply_fn, apply_arg = await asyncio.gather(_compute_apply_fn(),
                                               _compute_apply_arg())
    return await executor.create_call(apply_fn, apply_arg)

  return await _compute_divided()
 def test_serialize_deserialize_function_types(self):
     self._serialize_deserialize_roundtrip_test([
         computation_types.FunctionType(tf.int32, tf.bool),
         computation_types.FunctionType(None, tf.bool),
     ])
 def test_repr(self):
     self.assertEqual(
         repr(computation_types.FunctionType(tf.int32, tf.bool)),
         'FunctionType(TensorType(tf.int32), TensorType(tf.bool))')
     self.assertEqual(repr(computation_types.FunctionType(None, tf.bool)),
                      'FunctionType(None, TensorType(tf.bool))')
 def test_str(self):
     self.assertEqual(
         str(computation_types.FunctionType(tf.int32, tf.bool)),
         '(int32 -> bool)')
     self.assertEqual(str(computation_types.FunctionType(None, tf.bool)),
                      '( -> bool)')
示例#10
0
def create_binary_operator_with_upcast(
    type_signature: computation_types.NamedTupleType,
    operator: Callable[[Any, Any], Any]) -> pb.Computation:
  """Creates TF computation upcasting its argument and applying `operator`.

  Args:
    type_signature: A `computation_types.NamedTupleType` with two elements, both
      of the same type or the second able to be upcast to the first, as
      explained in `apply_binary_operator_with_upcast`, and both containing only
      tuples and tensors in their type tree.
    operator: Callable defining the operator.

  Returns:
    A `building_blocks.CompiledComputation` encapsulating a function which
    upcasts the second element of its argument and applies the binary
    operator.
  """
  py_typecheck.check_type(type_signature, computation_types.NamedTupleType)
  py_typecheck.check_callable(operator)
  type_analysis.check_tensorflow_compatible_type(type_signature)
  if not type_signature.is_tuple() or len(type_signature) != 2:
    raise TypeError('To apply a binary operator, we must by definition have an '
                    'argument which is a `NamedTupleType` with 2 elements; '
                    'asked to create a binary operator for type: {t}'.format(
                        t=type_signature))
  if type_analysis.contains(type_signature, lambda t: t.is_sequence()):
    raise TypeError(
        'Applying binary operators in TensorFlow is only '
        'supported on Tensors and NamedTupleTypes; you '
        'passed {t} which contains a SequenceType.'.format(t=type_signature))

  def _pack_into_type(to_pack, type_spec):
    """Pack Tensor value `to_pack` into the nested structure `type_spec`."""
    if type_spec.is_tuple():
      elem_iter = anonymous_tuple.iter_elements(type_spec)
      return anonymous_tuple.AnonymousTuple([
          (elem_name, _pack_into_type(to_pack, elem_type))
          for elem_name, elem_type in elem_iter
      ])
    elif type_spec.is_tensor():
      return tf.broadcast_to(to_pack, type_spec.shape)

  with tf.Graph().as_default() as graph:
    first_arg, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
        'x', type_signature[0], graph)
    operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
        'y', type_signature[1], graph)
    if type_signature[0].is_equivalent_to(type_signature[1]):
      second_arg = operand_2_value
    else:
      second_arg = _pack_into_type(operand_2_value, type_signature[0])

    if type_signature[0].is_tensor():
      result_value = operator(first_arg, second_arg)
    elif type_signature[0].is_tuple():
      result_value = anonymous_tuple.map_structure(operator, first_arg,
                                                   second_arg)
    else:
      raise TypeError('Encountered unexpected type {t}; can only handle Tensor '
                      'and NamedTupleTypes.'.format(t=type_signature[0]))

  result_type, result_binding = tensorflow_utils.capture_result_from_graph(
      result_value, graph)

  type_signature = computation_types.FunctionType(type_signature, result_type)
  parameter_binding = pb.TensorFlow.Binding(
      tuple=pb.TensorFlow.NamedTupleBinding(
          element=[operand_1_binding, operand_2_binding]))
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=parameter_binding,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow)
示例#11
0
 def __init__(self, name, parameter_type):
   self._name = name
   super(TestFunction, self).__init__(
       computation_types.FunctionType(parameter_type, tf.string),
       context_stack_impl.context_stack)
示例#12
0
def check_and_pack_after_aggregate_type_signature(type_spec,
                                                  previously_packed_types):
  """Checks types inferred from `after_aggregate` and packs in `previously_packed_types`.

  After splitting the `next` portion of a `tff.utils.IterativeProcess` all the
  way down, `after_aggregate` should have
  type signature `<<<s1,c1>,c2>,s3> -> <s6,s7,c6>`. This
  function validates every element of the above, extracting and packing in
  addition types of `s3` and `s4`.

  Args:
    type_spec: The `type_signature` attribute of the `after_aggregate` portion
      of the `tff.utils.IterativeProcess` from which we are looking to extract
      an instance of `canonical_form.CanonicalForm`.
    previously_packed_types: Dict containing the information from `next`,
      `before_broadcast` and `before_aggregate` in the iterative process we are
      parsing.

  Returns:
    A `dict` packing the types which can be inferred from `type_spec`.

  Raises:
    TypeError: If `type_signature` is incompatible with
    `previously_packed_types`.
  """
  should_raise = False
  if not (type_spec.parameter[0][0][0] == previously_packed_types['s1_type'] and
          type_spec.parameter[0][0][1] == previously_packed_types['c1_type'] and
          type_spec.parameter[0][1] == previously_packed_types['c2_type'] and
          type_spec.parameter[1] == previously_packed_types['s3_type']):
    should_raise = True
  if not (type_spec.result[0] == previously_packed_types['s6_type'] and
          type_spec.result[1] == previously_packed_types['s7_type']):
    should_raise = True
  if len(type_spec.result
        ) == 3 and type_spec.result[2] != previously_packed_types['c6_type']:
    should_raise = True
  if should_raise:
    # TODO(b/121290421): These error messages, and indeed the 'track boolean and
    # raise once' logic of these methods as well, is intended to be provisional
    # and revisited when we've seen the compilation pipeline fail more clearly,
    # or maybe preferably iteratively improved as new failure modes are
    # encountered.
    raise TypeError(
        'Encountered a type error while checking `after_aggregate`; '
        'expected a type signature of the form '
        '`<<<s1,c1>,c2>,s3> -> <s6,s7,c6>`, where s1 matches {}, '
        'c1 matches {}, c2 matches {}, s3 matches {}, s6 matches '
        '{}, s7 matches {}, c6 matches {}, as defined in '
        '`canonical_form.CanonicalForm`. Encountered a type signature '
        '{}.'.format(previously_packed_types['s1_type'],
                     previously_packed_types['c1_type'],
                     previously_packed_types['c2_type'],
                     previously_packed_types['s3_type'],
                     previously_packed_types['s6_type'],
                     previously_packed_types['s7_type'],
                     previously_packed_types['c6_type'], type_spec))
  s4_type = computation_types.FederatedType([
      previously_packed_types['s1_type'].member,
      previously_packed_types['s3_type'].member
  ], placements.SERVER)
  s5_type = computation_types.FederatedType([
      previously_packed_types['s6_type'].member,
      previously_packed_types['s7_type'].member
  ], placements.SERVER)
  newly_determined_types = {}
  newly_determined_types['s4_type'] = s4_type
  newly_determined_types['s5_type'] = s5_type
  newly_determined_types['update_type'] = computation_types.FunctionType(
      s4_type.member, s5_type.member)
  c3_type = computation_types.FederatedType([
      previously_packed_types['c1_type'].member,
      previously_packed_types['c2_type'].member
  ], placements.CLIENTS)
  newly_determined_types['c3_type'] = c3_type
  return dict(
      itertools.chain(
          six.iteritems(previously_packed_types),
          six.iteritems(newly_determined_types)))
示例#13
0
def check_and_pack_before_aggregate_type_signature(type_spec,
                                                   previously_packed_types):
  """Checks types inferred from `before_aggregate` and packs in `previously_packed_types`.

  After splitting the `after_broadcast` portion of a
  `tff.utils.IterativeProcess` into `before_aggregate` and `after_aggregate`,
  `before_aggregate` should have type signature
  `<<s1,c1>,c2> -> <c5,zero,accumulate,merge,report>`. This
  function validates `c1`, `s1` and `c2` against the existing entries in
  `previously_packed_types`, then packs `s5`, `zero`, `accumulate`, `merge` and
  `report`.

  Args:
    type_spec: The `type_signature` attribute of the `before_aggregate` portion
      of the `tff.utils.IterativeProcess` from which we are looking to extract
      an instance of `canonical_form.CanonicalForm`.
    previously_packed_types: Dict containing the information from `next` and
      `before_broadcast` in the iterative process we are parsing.

  Returns:
    A `dict` packing the types which can be inferred from `type_spec`.

  Raises:
    TypeError: If `type_signature` is incompatible with
    `previously_packed_types`.
  """
  should_raise = False
  if not (isinstance(type_spec, computation_types.FunctionType) and
          isinstance(type_spec.parameter, computation_types.NamedTupleType)):
    should_raise = True
  if not (isinstance(type_spec.parameter[0], computation_types.NamedTupleType)
          and len(type_spec.parameter[0]) == 2 and
          type_spec.parameter[0][0] == previously_packed_types['s1_type'] and
          type_spec.parameter[0][1] == previously_packed_types['c1_type']):
    should_raise = True
  if not (
      isinstance(type_spec.parameter[1], computation_types.FederatedType) and
      type_spec.parameter[1].placement == placements.CLIENTS and
      type_spec.parameter[1].member == previously_packed_types['s2_type'].member
  ):
    should_raise = True
  if not (isinstance(type_spec.result, computation_types.NamedTupleType) and
          len(type_spec.result) == 5 and
          isinstance(type_spec.result[0], computation_types.FederatedType) and
          type_spec.result[0].placement == placements.CLIENTS and
          type_utils.is_tensorflow_compatible_type(type_spec.result[1]) and
          type_spec.result[2] == computation_types.FunctionType(
              [type_spec.result[1], type_spec.result[0].member],
              type_spec.result[1]) and
          type_spec.result[3] == computation_types.FunctionType(
              [type_spec.result[1], type_spec.result[1]], type_spec.result[1])
          and type_spec.result[4].parameter == type_spec.result[1] and
          type_utils.is_tensorflow_compatible_type(type_spec.result[4].result)):
    should_raise = True
  if should_raise:
    # TODO(b/121290421): These error messages, and indeed the 'track boolean and
    # raise once' logic of these methods as well, is intended to be provisional
    # and revisited when we've seen the compilation pipeline fail more clearly,
    # or maybe preferably iteratively improved as new failure modes are
    # encountered.
    raise TypeError(
        'Encountered a type error while checking '
        '`before_aggregate`. Expected a type signature of the '
        'form `<<s1,c1>,c2> -> <c5,zero,accumulate,merge,report>`, '
        'where `s1` matches {}, `c1` matches {}, and `c2` matches '
        'the result of broadcasting {}, as defined in '
        '`canonical_form.CanonicalForm`. Found type signature {}.'.format(
            previously_packed_types['s1_type'],
            previously_packed_types['c1_type'],
            previously_packed_types['s2_type'], type_spec))
  newly_determined_types = {}
  c2_type = type_spec.parameter[1]
  newly_determined_types['c2_type'] = c2_type
  c3_type = computation_types.FederatedType(
      [previously_packed_types['c1_type'].member, c2_type.member],
      placements.CLIENTS)
  newly_determined_types['c3_type'] = c3_type
  c5_type = type_spec.result[0]
  zero_type = computation_types.FunctionType(None, type_spec.result[1])
  accumulate_type = type_spec.result[2]
  merge_type = type_spec.result[3]
  report_type = type_spec.result[4]
  newly_determined_types['c5_type'] = c5_type
  newly_determined_types['zero_type'] = zero_type
  newly_determined_types['accumulate_type'] = accumulate_type
  newly_determined_types['merge_type'] = merge_type
  newly_determined_types['report_type'] = report_type
  newly_determined_types['s3_type'] = computation_types.FederatedType(
      report_type.result, placements.SERVER)
  c4_type = computation_types.FederatedType([
      newly_determined_types['c5_type'].member,
      previously_packed_types['c6_type'].member
  ], placements.CLIENTS)
  newly_determined_types['c4_type'] = c4_type
  newly_determined_types['work_type'] = computation_types.FunctionType(
      c3_type.member, c4_type.member)
  return dict(
      itertools.chain(
          six.iteritems(previously_packed_types),
          six.iteritems(newly_determined_types)))
#   a = generic_partial_reduce(x, zero, accumulate, INTERMEDIATE_AGGREGATORS)
#   b = generic_reduce(a, zero, merge, SERVER)
#   c = generic_map(report, b)
#   return c
#
# Actual implementations might vary.
#
# Type signature: <{T}@CLIENTS,U,(<U,T>->U),(<U,U>->U),(U->R)> -> R@SERVER
FEDERATED_AGGREGATE = IntrinsicDef(
    'FEDERATED_AGGREGATE', 'federated_aggregate',
    computation_types.FunctionType(parameter=[
        type_factory.at_clients(computation_types.AbstractType('T')),
        computation_types.AbstractType('U'),
        type_factory.reduction_op(computation_types.AbstractType('U'),
                                  computation_types.AbstractType('T')),
        type_factory.binary_op(computation_types.AbstractType('U')),
        computation_types.FunctionType(computation_types.AbstractType('U'),
                                       computation_types.AbstractType('R'))
    ],
                                   result=type_factory.at_server(
                                       computation_types.AbstractType('R'))))

# Applies a given function to a value on the server.
#
# Type signature: <(T->U),T@SERVER> -> U@SERVER
FEDERATED_APPLY = IntrinsicDef(
    'FEDERATED_APPLY', 'federated_apply',
    computation_types.FunctionType(parameter=[
        computation_types.FunctionType(computation_types.AbstractType('T'),
                                       computation_types.AbstractType('U')),
        type_factory.at_server(computation_types.AbstractType('T')),
def create_binary_operator(operator, operand_type) -> pb.Computation:
    """Returns a tensorflow computation representing the binary `operator`.

  The returned computation has the type signature `(<T,T> -> U)`, where `T` is
  `operand_type` and `U` is the result of applying the `operator` to a tuple of
  type `<T,T>`

  Note: If `operand_type` is a `computation_types.NamedTupleType`, then
  `operator` will be applied pointwise. This places the burden on callers of
  this function to construct the correct values to pass into the returned
  function. For example, to divide `[2, 2]` by `2`, first `2` must be packed
  into the data structure `[x, x]`, before the division operator of the
  appropriate type is called.

  Args:
    operator: A callable taking two arguments representing the operation to
      encode For example: `tf.math.add`, `tf.math.multiply`, and
        `tf.math.divide`.
    operand_type: The type of the argument to the constructed binary operator; A
      type convertible to instance of `computation_types.Type` via
      `computation_types.to_type` which can only contain types which are
      compatible with the TFF generic operators (named tuples and tensors).

  Raises:
    TypeError: If the constraints of `operand_type` are violated or `operator`
      is not callable.
  """
    operand_type = computation_types.to_type(operand_type)
    if not type_utils.is_generic_op_compatible_type(operand_type):
        raise TypeError(
            'The type {} contains a type other than `computation_types.TensorType` '
            'and `computation_types.NamedTupleType`; this is disallowed in the '
            'generic operators.'.format(operand_type))
    py_typecheck.check_callable(operator)
    with tf.Graph().as_default() as graph:
        operand_1_value, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
            'x', operand_type, graph)
        operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
            'y', operand_type, graph)

        if isinstance(operand_type, computation_types.TensorType):
            result_value = operator(operand_1_value, operand_2_value)
        elif isinstance(operand_type, computation_types.NamedTupleType):
            result_value = anonymous_tuple.map_structure(
                operator, operand_1_value, operand_2_value)
        else:
            raise TypeError(
                'Operand type {} cannot be used in generic operations. The whitelist '
                'in `type_utils.is_generic_op_compatible_type` has allowed it to '
                'pass, and should be updated.'.format(operand_type))
        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result_value, graph)

    type_signature = computation_types.FunctionType(
        [operand_type, operand_type], result_type)
    parameter_binding = pb.TensorFlow.Binding(
        tuple=pb.TensorFlow.NamedTupleBinding(
            element=[operand_1_binding, operand_2_binding]))
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=parameter_binding,
                               result=result_binding)
    return pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow)
示例#16
0
 def test_parameter_and_result(self):
     t = computation_types.FunctionType(tf.int32, tf.bool)
     self.assertEqual(str(t.parameter), 'int32')
     self.assertEqual(str(t.result), 'bool')
def create_constant(scalar_value, type_spec) -> pb.Computation:
    """Returns a tensorflow computation returning a constant `scalar_value`.

  The returned computation has the type signature `( -> T)`, where `T` is
  `type_spec`.

  `scalar_value` must be a scalar, and cannot be a float if any of the tensor
  leaves of `type_spec` contain an integer data type. `type_spec` must contain
  only named tuples and tensor types, but these can be arbitrarily nested.

  Args:
    scalar_value: A scalar value to place in all the tensor leaves of
      `type_spec`.
    type_spec: A type convertible to instance of `computation_types.Type` via
      `computation_types.to_type` and whose resulting type tree can only contain
      named tuples and tensors.

  Raises:
    TypeError: If the constraints of `type_spec` are violated.
  """
    type_spec = computation_types.to_type(type_spec)

    if not type_utils.is_generic_op_compatible_type(type_spec):
        raise TypeError(
            'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
            ' only nested tuples and tensors are permitted.'.format(type_spec))
    inferred_scalar_value_type = type_utils.infer_type(scalar_value)
    if (not isinstance(inferred_scalar_value_type,
                       computation_types.TensorType)
            or inferred_scalar_value_type.shape != tf.TensorShape(())):
        raise TypeError(
            'Must pass a scalar value to `create_tensorflow_constant`; encountered '
            'a value {}'.format(scalar_value))
    tensor_dtypes_in_type_spec = []

    def _pack_dtypes(type_signature):
        """Appends dtype of `type_signature` to nonlocal variable."""
        if isinstance(type_signature, computation_types.TensorType):
            tensor_dtypes_in_type_spec.append(type_signature.dtype)
        return type_signature, False

    type_transformations.transform_type_postorder(type_spec, _pack_dtypes)

    if (any(x.is_integer for x in tensor_dtypes_in_type_spec)
            and not inferred_scalar_value_type.dtype.is_integer):
        raise TypeError(
            'Only integers can be used as scalar values if our desired constant '
            'type spec contains any integer tensors; passed scalar {} of dtype {} '
            'for type spec {}.'.format(scalar_value,
                                       inferred_scalar_value_type.dtype,
                                       type_spec))

    def _create_result_tensor(type_spec, scalar_value):
        """Packs `scalar_value` into `type_spec` recursively."""
        if isinstance(type_spec, computation_types.TensorType):
            type_spec.shape.assert_is_fully_defined()
            result = tf.constant(scalar_value,
                                 dtype=type_spec.dtype,
                                 shape=type_spec.shape)
        else:
            elements = []
            for _, type_element in anonymous_tuple.iter_elements(type_spec):
                elements.append(
                    _create_result_tensor(type_element, scalar_value))
            result = elements
        return result

    with tf.Graph().as_default() as graph:
        result = _create_result_tensor(type_spec, scalar_value)
        _, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(None, type_spec)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=None,
                               result=result_binding)
    return pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow)
示例#18
0
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
    """Serializes the 'target' as a TF computation with a given parameter type.

  See also `serialize_tf2_as_tf_computation` for TensorFlow 2
  serialization.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function. In the future, we will add here
      support for serializing the various kinds of non-eager and eager
      functions, and eventually aim at full support for and compliance with TF
      2.0. This function is currently required to declare either zero parameters
      if `parameter_type` is `None`, or exactly one parameter if it's not
      `None`.  The nested structure of this parameter must correspond to the
      structure of the 'parameter_type'. In the future, we may support targets
      with multiple args/keyword args (to be documented in the API and
      referenced from here).
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    context_stack: The context stack to use.

  Returns:
    A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
    the instance with the `pb.TensorFlow` variant set, and the type is an
    instance of `tff.Type`, potentially including Python container annotations,
    for use by TensorFlow computation wrappers.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    # TODO(b/113112108): Support a greater variety of target type signatures,
    # with keyword args or multiple args corresponding to elements of a tuple.
    # Document all accepted forms with examples in the API, and point to there
    # from here.

    py_typecheck.check_type(target, types.FunctionType)
    py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
    parameter_type = computation_types.to_type(parameter_type)
    argspec = inspect.getargspec(target)  # pylint: disable=deprecated-method

    with tf.Graph().as_default() as graph:
        args = []
        if parameter_type:
            if len(argspec.args) != 1:
                raise ValueError(
                    'Expected the target to declare exactly one parameter, '
                    'found {}.'.format(repr(argspec.args)))
            parameter_name = argspec.args[0]
            parameter_value, parameter_binding = graph_utils.stamp_parameter_in_graph(
                parameter_name, parameter_type, graph)
            args.append(parameter_value)
        else:
            if argspec.args:
                raise ValueError(
                    'Expected the target to declare no parameters, found {}.'.
                    format(repr(argspec.args)))
            parameter_binding = None
        context = tf_computation_context.TensorFlowComputationContext(graph)
        with context_stack.install(context):
            result = target(*args)

            # TODO(b/122081673): This needs to change for TF 2.0. We may also
            # want to allow the person creating a tff.tf_computation to specify
            # a different initializer; e.g., if it is known that certain
            # variables will be assigned immediately to arguments of the function,
            # then it is wasteful to initialize them before this.
            #
            # The following is a bit of a work around: the collections below may
            # contain variables more than once, hence we throw into a set. TFF needs
            # to ensure all variables are initialized, but not all variables are
            # always in the collections we expect. tff.learning._KerasModel tries to
            # pull Keras variables (that may or may not be in GLOBAL_VARIABLES) into
            # TFF_MODEL_VARIABLES for now.
            all_variables = set(tf.global_variables() + tf.local_variables() +
                                tf.get_collection(graph_keys.GraphKeys.
                                                  VARS_FOR_TFF_TO_INITIALIZE))
            if all_variables:
                # Use a readable but not-too-long name for the init_op.
                name = 'init_op_for_' + '_'.join(
                    [v.name.replace(':0', '') for v in all_variables])
                if len(name) > 50:
                    name = 'init_op_for_{}_variables'.format(
                        len(all_variables))
                with tf.control_dependencies(context.init_ops):
                    # Before running the main new init op, run any initializers for sub-
                    # computations from context.init_ops. Variables from import_graph_def
                    # will not make it into the global collections, and so will not be
                    # initialized without this code path.
                    init_op_name = tf.initializers.variables(all_variables,
                                                             name=name).name
            elif context.init_ops:
                init_op_name = tf.group(*context.init_ops,
                                        name='subcomputation_init_ops').name
            else:
                init_op_name = None

        result_type, result_binding = graph_utils.capture_result_from_graph(
            result, graph)

    annotated_type = computation_types.FunctionType(parameter_type,
                                                    result_type)

    return pb.Computation(type=pb.Type(function=pb.FunctionType(
        parameter=type_serialization.serialize_type(parameter_type),
        result=type_serialization.serialize_type(result_type))),
                          tensorflow=pb.TensorFlow(
                              graph_def=serialization_utils.pack_graph_def(
                                  graph.as_graph_def()),
                              parameter=parameter_binding,
                              result=result_binding,
                              initialize_op=init_op_name)), annotated_type
 def test_raises_reference_to_functional_type(self):
     function_type = computation_types.FunctionType(tf.int32, tf.int32)
     ref = building_blocks.Reference('x', function_type)
     with self.assertRaisesRegex(ValueError, 'of functional type passed'):
         mapreduce_transformations.consolidate_and_extract_local_processing(
             ref)
示例#20
0
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
    """Serializes the 'target' as a TF computation with a given parameter type.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function or `tf.function`, with arguments
      matching the 'parameter_type'.
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    unpack: Whether to always unpack the parameter_type. Necessary for support
      of polymorphic tf2_computations.

  Returns:
    The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
      set.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    py_typecheck.check_callable(target)
    parameter_type = computation_types.to_type(parameter_type)
    argspec = function_utils.get_argspec(target)
    if argspec.args and not parameter_type:
        raise ValueError(
            'Expected the target to declare no parameters, found {}.'.format(
                repr(argspec.args)))

    # In the codepath for TF V1 based serialization (tff.tf_computation),
    # we get the "wrapped" function to serialize. Here, target is the
    # raw function to be wrapped; however, we still need to know if
    # the parameter_type should be unpacked into multiple args and kwargs
    # in order to construct the TensorSpecs to be passed in the call
    # to get_concrete_fn below.
    unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
    arg_typespecs, kwarg_typespecs, parameter_binding = (
        graph_utils.get_tf_typespec_and_binding(parameter_type,
                                                arg_names=argspec.args,
                                                unpack=unpack))

    # Pseudo-global to be appended to once when target_poly below is traced.
    type_and_binding_slot = []

    # N.B. To serialize a tf.function or eager python code,
    # the return type must be a flat list, tuple, or dict. However, the
    # tff.tf_computation must be able to handle structured inputs and outputs.
    # Thus, we intercept the result of calling the original target fn, introspect
    # its structure to create a result_type and bindings, and then return a
    # flat dict output. It is this new "unpacked" tf.function that we will
    # serialize using tf.saved_model.save.
    #
    # TODO(b/117428091): The return type limitation is primarily a limitation of
    # SignatureDefs  and therefore of the signatures argument to
    # tf.saved_model.save. tf.functions attached to objects and loaded back with
    # tf.saved_model.load can take/return nests; this might offer a better
    # approach to the one taken here.

    @tf.function(autograph=False)
    def target_poly(*args, **kwargs):
        result = target(*args, **kwargs)
        result_dict, result_type, result_binding = (
            graph_utils.get_tf2_result_dict_and_binding(result))
        assert not type_and_binding_slot
        # A "side channel" python output.
        type_and_binding_slot.append((result_type, result_binding))
        return result_dict

    # Triggers tracing so that type_and_binding_slot is filled.
    cc_fn = target_poly.get_concrete_function(*arg_typespecs,
                                              **kwarg_typespecs)
    assert len(type_and_binding_slot) == 1
    result_type, result_binding = type_and_binding_slot[0]

    # N.B. Note that cc_fn does *not* accept the same args and kwargs as the
    # Python target_poly; instead, it must be called with **kwargs based on the
    # unique names embedded in the TensorSpecs inside arg_typespecs and
    # kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
    # between these tensor names and the components of the (possibly nested) TFF
    # input type. When cc_fn is serialized, concrete tensors for each input are
    # introduced, and the call finalize_binding(parameter_binding,
    # sigs['serving_default'].inputs) updates the bindings to reference these
    # concrete tensors.

    # Associate vars with unique names and explicitly attach to the Checkpoint:
    var_dict = {
        'var{:02d}'.format(i): v
        for i, v in enumerate(cc_fn.graph.variables)
    }
    saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)

    try:
        # TODO(b/122081673): All we really need is the  meta graph def, we could
        # probably just load that directly, e.g., using parse_saved_model from
        # tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
        # depend on that presumably non-public symbol. Perhaps TF can expose a way
        # to just get the MetaGraphDef directly without saving to a tempfile? This
        # looks like a small change to v2.saved_model.save().
        outdir = tempfile.mkdtemp('savedmodel')
        tf.saved_model.save(saveable, outdir, signatures=cc_fn)

        graph = tf.Graph()
        with tf.Session(graph=graph) as sess:
            mgd = tf.saved_model.loader.load(
                sess,
                tags=[tf.saved_model.tag_constants.SERVING],
                export_dir=outdir)
    finally:
        shutil.rmtree(outdir)
    sigs = mgd.signature_def

    # TODO(b/123102455): Figure out how to support the init_op. The meta graph def
    # contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
    # probably won't do what we want, because it will want to read from
    # Checkpoints, not just run Variable initializerse (?). The right solution may
    # be to grab the target_poly.get_initialization_function(), and save a sig for
    # that.

    # Now, traverse the signature from the MetaGraphDef to find
    # find the actual tensor names and write them into the bindings.
    finalize_binding(parameter_binding, sigs['serving_default'].inputs)
    finalize_binding(result_binding, sigs['serving_default'].outputs)

    annotated_type = computation_types.FunctionType(parameter_type,
                                                    result_type)

    return pb.Computation(type=pb.Type(function=pb.FunctionType(
        parameter=type_serialization.serialize_type(parameter_type),
        result=type_serialization.serialize_type(result_type))),
                          tensorflow=pb.TensorFlow(
                              graph_def=serialization_utils.pack_graph_def(
                                  mgd.graph_def),
                              parameter=parameter_binding,
                              result=result_binding)), annotated_type
示例#21
0
 def test_intrinsic_class_fails_bad_type(self):
     with self.assertRaises(TypeError):
         _ = building_blocks.Intrinsic(
             intrinsic_defs.GENERIC_PLUS.uri,
             computation_types.FunctionType([tf.int32, tf.int32],
                                            tf.float32))
示例#22
0
def _get_type_info(initialize_tree, before_broadcast, after_broadcast,
                   before_aggregate, after_aggregate):
    """Returns type information for an `tff.utils.IterativeProcess`.

  This function is intended to be used by
  `get_canonical_form_for_iterative_process` to create the expected type
  signatures when compiling a given `tff.utils.IterativeProcess` into a
  `tff.backends.mapreduce.CanonicalForm` and returns a `collections.OrderedDict`
  whose keys and order match the explicit and intermediate componets of
  `tff.backends.mapreduce.CanonicalForm` defined here:

  ```
  s1 = arg[0]
  c1 = arg[1]
  s2 = intrinsics.federated_map(cf.prepare, s1)
  c2 = intrinsics.federated_broadcast(s2)
  c3 = intrinsics.federated_zip([c1, c2])
  c4 = intrinsics.federated_map(cf.work, c3)
  c5 = c4[0]
  c6 = c5[0]
  c7 = c5[1]
  c8 = c4[1]
  s3 = intrinsics.federated_aggregate(c6,
                                      cf.zero(),
                                      cf.accumulate,
                                      cf.merge,
                                      cf.report)
  s4 = intrinsics.federated_secure_sum(c7, cf.bitwidth())
  s5 = intrinsics.federated_zip([s3, s4])
  s6 = intrinsics.federated_zip([s1, s5])
  s7 = intrinsics.federated_map(cf.update, s6)
  s8 = s7[0]
  s9 = s7[1]
  ```

  Note that the type signatures for the `initalize` and `next` components of an
  `tff.utils.IterativeProcess` are:

  initalize:  `( -> s1)`
  next:       `(<s1,c1> -> <s8,s9,c8>)`

  However, the `next` component of an `tff.utils.IterativeProcess` has been
  split into a before and after broadcast and a before and after aggregate with
  the given semantics:

  ```
  (arg -> after(<arg, intrinsic(before(arg))>))
  ```

  as a result, the type signatures for the components split from the `next`
  component of an `tff.utils.IterativeProcess` are:

  before_broadcast:  `(<s1,c1> -> s2)`
  after_broadcast:   `(<<s1,c1>,c2> -> <s8,s9,c8>)`
  before_aggregate:  `(<<s1,c1>,c2> -> <<c6,zero,accumulate,merge,report>,c7>)`
  after_aggregate:   `(<<<s1,c1>,c2>,<s3,s4>> -> <s8,s9,c8>)`

  Args:
    initialize_tree: An instance of `building_blocks.ComputationBuildingBlock`
      representing the `initalize` component of an `tff.utils.IterativeProcess`.
    before_broadcast: The first result of splitting `next` component of an
      `tff.utils.IterativeProcess` on broadcast.
    after_broadcast: The second result of splitting `next` component of an
      `tff.utils.IterativeProcess` on broadcast.
    before_aggregate: The first result of splitting `next` component of an
      `tff.utils.IterativeProcess` on aggregate.
    after_aggregate: The second result of splitting `next` component of an
      `tff.utils.IterativeProcess` on aggregate.

  Raises:
    transformations.CanonicalFormCompilationError: If the arguments are of the
      wrong types.
  """

    # The type signature of `initalize` is: `( -> s1)`.
    init_tree_ty = initialize_tree.type_signature
    _check_type_is_no_arg_fn(init_tree_ty)
    _check_type(init_tree_ty.result, computation_types.FederatedType)
    _check_placement(init_tree_ty.result, placements.SERVER)
    # The named components of canonical form have no placement, so we must
    # remove the placement on the return type of initialize_tree
    initialize_type = computation_types.FunctionType(
        initialize_tree.type_signature.parameter,
        initialize_tree.type_signature.result.member)

    # The type signature of `before_broadcast` is: `(<s1,c1> -> s2)`.
    _check_type(before_broadcast.type_signature,
                computation_types.FunctionType)
    _check_type(before_broadcast.type_signature.parameter,
                computation_types.NamedTupleType)
    _check_len(before_broadcast.type_signature.parameter, 2)
    s1_type = before_broadcast.type_signature.parameter[0]
    _check_type(s1_type, computation_types.FederatedType)
    _check_placement(s1_type, placements.SERVER)
    c1_type = before_broadcast.type_signature.parameter[1]
    _check_type(c1_type, computation_types.FederatedType)
    _check_placement(c1_type, placements.CLIENTS)
    s2_type = before_broadcast.type_signature.result
    _check_type(s2_type, computation_types.FederatedType)
    _check_placement(s2_type, placements.SERVER)

    prepare_type = computation_types.FunctionType(s1_type.member,
                                                  s2_type.member)

    # The type signature of `after_broadcast` is: `(<<s1,c1>,c2> -> <s8,s9,c8>)'.
    _check_type(after_broadcast.type_signature, computation_types.FunctionType)
    _check_type(after_broadcast.type_signature.parameter,
                computation_types.NamedTupleType)
    _check_len(after_broadcast.type_signature.parameter, 2)
    _check_type(after_broadcast.type_signature.parameter[0],
                computation_types.NamedTupleType)
    _check_len(after_broadcast.type_signature.parameter[0], 2)
    _check_type_equal(after_broadcast.type_signature.parameter[0][0], s1_type)
    _check_type_equal(after_broadcast.type_signature.parameter[0][1], c1_type)
    c2_type = after_broadcast.type_signature.parameter[1]
    _check_type(c2_type, computation_types.FederatedType)
    _check_placement(c2_type, placements.CLIENTS)
    _check_type(after_broadcast.type_signature.result,
                computation_types.NamedTupleType)
    _check_len(after_broadcast.type_signature.result, 3)
    s8_type = after_broadcast.type_signature.result[0]
    _check_type(s8_type, computation_types.FederatedType)
    _check_placement(s8_type, placements.SERVER)
    s9_type = after_broadcast.type_signature.result[1]
    _check_type(s9_type, computation_types.FederatedType)
    _check_placement(s9_type, placements.SERVER)
    c8_type = after_broadcast.type_signature.result[2]
    _check_type(c8_type, computation_types.FederatedType)
    _check_placement(c8_type, placements.CLIENTS)

    # The type signature of `before_aggregate` is:
    # `(<<s1,c1>,c2> -> <<c6,zero,accumulate,merge,report>,<c7,bitwidth>>)`.
    _check_type(before_aggregate.type_signature,
                computation_types.FunctionType)
    _check_type(before_aggregate.type_signature.parameter,
                computation_types.NamedTupleType)
    _check_len(before_aggregate.type_signature.parameter, 2)
    _check_type(before_aggregate.type_signature.parameter[0],
                computation_types.NamedTupleType)
    _check_len(before_aggregate.type_signature.parameter[0], 2)
    _check_type_equal(before_aggregate.type_signature.parameter[0][0], s1_type)
    _check_type_equal(before_aggregate.type_signature.parameter[0][1], c1_type)
    _check_type_equal(before_aggregate.type_signature.parameter[1], c2_type)
    _check_type(before_aggregate.type_signature.result,
                computation_types.NamedTupleType)
    _check_len(before_aggregate.type_signature.result, 2)
    _check_len(before_aggregate.type_signature.result[0], 5)
    c6_type = before_aggregate.type_signature.result[0][0]
    _check_type(c6_type, computation_types.FederatedType)
    _check_placement(c6_type, placements.CLIENTS)
    zero_type = computation_types.FunctionType(
        None, before_aggregate.type_signature.result[0][1])
    type_utils.check_tensorflow_compatible_type(zero_type.result)
    accumulate_type = before_aggregate.type_signature.result[0][2]
    _check_type(accumulate_type, computation_types.FunctionType)
    merge_type = before_aggregate.type_signature.result[0][3]
    _check_type(merge_type, computation_types.FunctionType)
    report_type = before_aggregate.type_signature.result[0][4]
    _check_type(report_type, computation_types.FunctionType)
    _check_type(before_aggregate.type_signature.result[1],
                computation_types.NamedTupleType)
    _check_len(before_aggregate.type_signature.result[1], 2)
    c7_type = before_aggregate.type_signature.result[1][0]
    _check_type(c7_type, computation_types.FederatedType)
    _check_placement(c7_type, placements.CLIENTS)
    bitwidth_type = computation_types.FunctionType(
        None, before_aggregate.type_signature.result[1][1])
    type_utils.check_tensorflow_compatible_type(bitwidth_type.result)

    c3_type = computation_types.FederatedType([c1_type.member, c2_type.member],
                                              placements.CLIENTS)
    c5_type = computation_types.FederatedType([c6_type.member, c7_type.member],
                                              placements.CLIENTS)
    c4_type = computation_types.FederatedType([c5_type.member, c8_type.member],
                                              placements.CLIENTS)

    # The type signature of `after_aggregate` is:
    # `(<<<s1,c1>,c2>,<s3,s4>> -> <s8,s9,c8>)'.
    _check_type(after_aggregate.type_signature, computation_types.FunctionType)
    _check_type(after_aggregate.type_signature.parameter,
                computation_types.NamedTupleType)
    _check_len(after_aggregate.type_signature.parameter, 2)
    _check_type(after_aggregate.type_signature.parameter[0],
                computation_types.NamedTupleType)
    _check_len(after_aggregate.type_signature.parameter[0], 2)
    _check_type(after_aggregate.type_signature.parameter[0][0],
                computation_types.NamedTupleType)
    _check_len(after_aggregate.type_signature.parameter[0][0], 2)
    _check_type_equal(after_aggregate.type_signature.parameter[0][0][0],
                      s1_type)
    _check_type_equal(after_aggregate.type_signature.parameter[0][0][1],
                      c1_type)
    _check_type_equal(after_aggregate.type_signature.parameter[0][1], c2_type)
    _check_len(after_aggregate.type_signature.parameter[1], 2)
    s3_type = after_aggregate.type_signature.parameter[1][0]
    _check_type(s3_type, computation_types.FederatedType)
    _check_placement(s3_type, placements.SERVER)
    s4_type = after_aggregate.type_signature.parameter[1][1]
    _check_type(s4_type, computation_types.FederatedType)
    _check_placement(s4_type, placements.SERVER)
    _check_type_equal(after_aggregate.type_signature.result[0], s8_type)
    _check_type_equal(after_aggregate.type_signature.result[1], s9_type)
    _check_type_equal(after_aggregate.type_signature.result[2], c8_type)

    work_type = computation_types.FunctionType(c3_type.member, c4_type.member)

    s5_type = computation_types.FederatedType([s3_type.member, s4_type.member],
                                              placements.SERVER)
    s6_type = computation_types.FederatedType([s1_type.member, s5_type.member],
                                              placements.SERVER)
    s7_type = computation_types.FederatedType([s8_type.member, s9_type.member],
                                              placements.SERVER)
    update_type = computation_types.FunctionType(s6_type.member,
                                                 s7_type.member)

    return collections.OrderedDict(
        initialize_type=initialize_type,
        s1_type=s1_type,
        c1_type=c1_type,
        prepare_type=prepare_type,
        s2_type=s2_type,
        c2_type=c2_type,
        c3_type=c3_type,
        work_type=work_type,
        c4_type=c4_type,
        c5_type=c5_type,
        c6_type=c6_type,
        c7_type=c7_type,
        c8_type=c8_type,
        zero_type=zero_type,
        accumulate_type=accumulate_type,
        merge_type=merge_type,
        report_type=report_type,
        s3_type=s3_type,
        bitwidth_type=bitwidth_type,
        s4_type=s4_type,
        s5_type=s5_type,
        s6_type=s6_type,
        update_type=update_type,
        s7_type=s7_type,
        s8_type=s8_type,
        s9_type=s9_type,
    )
示例#23
0
def create_dummy_intrinsic_def_federated_broadcast():
    value = intrinsic_defs.FEDERATED_BROADCAST
    type_signature = computation_types.FunctionType(
        computation_types.at_server(tf.float32),
        computation_types.at_clients(tf.float32, all_equal=True))
    return value, type_signature
示例#24
0
 def _concretize_abstract_types(abstract_type_spec, concrete_type_spec):
     """Recursive helper function to construct concrete type spec."""
     if isinstance(abstract_type_spec, computation_types.AbstractType):
         bound_type = bound_abstract_types.get(str(
             abstract_type_spec.label))
         if bound_type:
             return bound_type
         else:
             bound_abstract_types[str(
                 abstract_type_spec.label)] = concrete_type_spec
             return concrete_type_spec
     elif isinstance(abstract_type_spec, computation_types.TensorType):
         return abstract_type_spec
     elif isinstance(abstract_type_spec, computation_types.NamedTupleType):
         if not isinstance(concrete_type_spec,
                           computation_types.NamedTupleType):
             raise TypeError(type_error_string)
         abstract_elements = anonymous_tuple.to_elements(abstract_type_spec)
         concrete_elements = anonymous_tuple.to_elements(concrete_type_spec)
         if len(abstract_elements) != len(concrete_elements):
             raise TypeError(type_error_string)
         concretized_tuple_elements = []
         for k in range(len(abstract_elements)):
             if abstract_elements[k][0] != concrete_elements[k][0]:
                 raise TypeError(type_error_string)
             concretized_tuple_elements.append(
                 (abstract_elements[k][0],
                  _concretize_abstract_types(abstract_elements[k][1],
                                             concrete_elements[k][1])))
         return computation_types.NamedTupleType(concretized_tuple_elements)
     elif isinstance(abstract_type_spec, computation_types.SequenceType):
         if not isinstance(concrete_type_spec,
                           computation_types.SequenceType):
             raise TypeError(type_error_string)
         return computation_types.SequenceType(
             _concretize_abstract_types(abstract_type_spec.element,
                                        concrete_type_spec.element))
     elif isinstance(abstract_type_spec, computation_types.FunctionType):
         if not isinstance(concrete_type_spec,
                           computation_types.FunctionType):
             raise TypeError(type_error_string)
         concretized_param = _concretize_abstract_types(
             abstract_type_spec.parameter, concrete_type_spec.parameter)
         concretized_result = _concretize_abstract_types(
             abstract_type_spec.result, concrete_type_spec.result)
         return computation_types.FunctionType(concretized_param,
                                               concretized_result)
     elif isinstance(abstract_type_spec, computation_types.PlacementType):
         if not isinstance(concrete_type_spec,
                           computation_types.PlacementType):
             raise TypeError(type_error_string)
         return abstract_type_spec
     elif isinstance(abstract_type_spec, computation_types.FederatedType):
         if not isinstance(concrete_type_spec,
                           computation_types.FederatedType):
             raise TypeError(type_error_string)
         new_member = _concretize_abstract_types(abstract_type_spec.member,
                                                 concrete_type_spec.member)
         return computation_types.FederatedType(
             new_member, abstract_type_spec.placement,
             abstract_type_spec.all_equal)
     elif abstract_type_spec is None:
         if concrete_type_spec is not None:
             raise TypeError(type_error_string)
         return None
     else:
         raise TypeError(
             'Unexpected abstract typespec {}.'.format(abstract_type_spec))
示例#25
0
def create_dummy_intrinsic_def_federated_sum():
    value = intrinsic_defs.FEDERATED_SUM
    type_signature = computation_types.FunctionType(
        computation_types.at_clients(tf.float32),
        computation_types.at_server(tf.float32))
    return value, type_signature
    def test_construction(self, weighted):
        aggregation_factory = (mean_factory.MeanFactory()
                               if weighted else sum_factory.SumFactory())

        iterative_process = optimizer_utils.build_model_delta_optimizer_process(
            model_fn=model_examples.LinearRegression,
            model_to_client_delta_fn=DummyClientDeltaFn,
            server_optimizer_fn=tf.keras.optimizers.SGD,
            model_update_aggregation_factory=aggregation_factory)

        if weighted:
            aggregate_state = collections.OrderedDict(value_sum_process=(),
                                                      weight_sum_process=())
            aggregate_metrics = collections.OrderedDict(mean_value=(),
                                                        mean_weight=())
        else:
            aggregate_state = ()
            aggregate_metrics = ()

        server_state_type = computation_types.FederatedType(
            optimizer_utils.ServerState(model=model_utils.ModelWeights(
                trainable=[
                    computation_types.TensorType(tf.float32, [2, 1]),
                    computation_types.TensorType(tf.float32)
                ],
                non_trainable=[computation_types.TensorType(tf.float32)]),
                                        optimizer_state=[tf.int64],
                                        delta_aggregate_state=aggregate_state,
                                        model_broadcast_state=()),
            placements.SERVER)

        self.assertEqual(
            str(
                computation_types.FunctionType(parameter=None,
                                               result=server_state_type)),
            str(iterative_process.initialize.type_signature))

        dataset_type = computation_types.FederatedType(
            computation_types.SequenceType(
                collections.OrderedDict(
                    x=computation_types.TensorType(tf.float32, [None, 2]),
                    y=computation_types.TensorType(tf.float32, [None, 1]))),
            placements.CLIENTS)

        metrics_type = computation_types.FederatedType(
            collections.OrderedDict(
                broadcast=(),
                aggregation=aggregate_metrics,
                train=collections.OrderedDict(
                    loss=computation_types.TensorType(tf.float32),
                    num_examples=computation_types.TensorType(tf.int32)),
                stat=collections.OrderedDict(
                    num_examples=computation_types.TensorType(tf.float32))),
            placements.SERVER)

        self.assertEqual(
            str(
                computation_types.FunctionType(
                    parameter=collections.OrderedDict(
                        server_state=server_state_type,
                        federated_dataset=dataset_type,
                    ),
                    result=(server_state_type, metrics_type))),
            str(iterative_process.next.type_signature))
示例#27
0
def create_dummy_intrinsic_def_federated_value_at_server():
    value = intrinsic_defs.FEDERATED_VALUE_AT_SERVER
    type_signature = computation_types.FunctionType(
        tf.float32, computation_types.at_server(tf.float32))
    return value, type_signature
示例#28
0
def _make_sequence_reduce_type(element_type, accumulator_type):
    return computation_types.FunctionType(parameter=[
        computation_types.SequenceType(element_type), accumulator_type,
        type_factory.reduction_op(accumulator_type, element_type)
    ],
                                          result=accumulator_type)
示例#29
0
def create_dummy_computation_lambda_identity():
    """Returns a lambda computation and type `(float32 -> float32)`."""
    tensor_type = computation_types.TensorType(tf.float32)
    value = computation_factory.create_lambda_identity(tensor_type)
    type_signature = computation_types.FunctionType(tensor_type, tensor_type)
    return value, type_signature
示例#30
0
def create_constant(
        value, type_spec: computation_types.Type) -> ComputationProtoAndType:
    """Returns a tensorflow computation returning a constant `value`.

  The returned computation has the type signature `( -> T)`, where `T` is
  `type_spec`.

  `value` must be a value convertible to a tensor or a structure of values, such
  that the dtype and shapes match `type_spec`. `type_spec` must contain only
  named tuples and tensor types, but these can be arbitrarily nested.

  Args:
    value: A value to embed as a constant in the tensorflow graph.
    type_spec: A `computation_types.Type` to use as the argument to the
      constructed binary operator; must contain only named tuples and tensor
      types.

  Raises:
    TypeError: If the constraints of `type_spec` are violated.
  """
    if not type_analysis.is_generic_op_compatible_type(type_spec):
        raise TypeError(
            'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
            ' only nested tuples and tensors are permitted.'.format(type_spec))
    inferred_value_type = type_conversions.infer_type(value)
    if (inferred_value_type.is_struct()
            and not type_spec.is_assignable_from(inferred_value_type)):
        raise TypeError(
            'Must pass a only tensor or structure of tensor values to '
            '`create_tensorflow_constant`; encountered a value {v} with inferred '
            'type {t!r}, but needed {s!r}'.format(v=value,
                                                  t=inferred_value_type,
                                                  s=type_spec))
    if inferred_value_type.is_struct():
        value = structure.from_container(value, recursive=True)
    tensor_dtypes_in_type_spec = []

    def _pack_dtypes(type_signature):
        """Appends dtype of `type_signature` to nonlocal variable."""
        if type_signature.is_tensor():
            tensor_dtypes_in_type_spec.append(type_signature.dtype)
        return type_signature, False

    type_transformations.transform_type_postorder(type_spec, _pack_dtypes)

    if (any(x.is_integer for x in tensor_dtypes_in_type_spec)
            and (inferred_value_type.is_tensor()
                 and not inferred_value_type.dtype.is_integer)):
        raise TypeError(
            'Only integers can be used as scalar values if our desired constant '
            'type spec contains any integer tensors; passed scalar {} of dtype {} '
            'for type spec {}.'.format(value, inferred_value_type.dtype,
                                       type_spec))

    result_type = type_spec

    def _create_result_tensor(type_spec, value):
        """Packs `value` into `type_spec` recursively."""
        if type_spec.is_tensor():
            type_spec.shape.assert_is_fully_defined()
            result = tf.constant(value,
                                 dtype=type_spec.dtype,
                                 shape=type_spec.shape)
        else:
            elements = []
            if inferred_value_type.is_struct():
                # Copy the leaf values according to the type_spec structure.
                for (name, elem_type), value in zip(
                        structure.iter_elements(type_spec), value):
                    elements.append(
                        (name, _create_result_tensor(elem_type, value)))
            else:
                # "Broadcast" the value to each level of the type_spec structure.
                for _, elem_type in structure.iter_elements(type_spec):
                    elements.append(
                        (None, _create_result_tensor(elem_type, value)))
            result = structure.Struct(elements)
        return result

    with tf.Graph().as_default() as graph:
        result = _create_result_tensor(result_type, value)
        _, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(None, result_type)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=None,
                               result=result_binding)
    return _tensorflow_comp(tensorflow, type_signature)