コード例 #1
0
def create_broadcast_scalar_to_shape(scalar_type: tf.DType,
                                     shape: tf.TensorShape) -> pb.Computation:
    """Returns a tensorflow computation returning the result of `tf.broadcast_to`.

  The returned computation has the type signature `(T -> U)`, where
  `T` is `scalar_type` and the `U` is a `tff.TensorType` with a dtype of
  `scalar_type` and a `shape`.

  Args:
    scalar_type: A `tf.DType`, the type of the scalar to broadcast.
    shape: A `tf.TensorShape` to broadcast to. Must be fully defined.

  Raises:
    TypeError: If `scalar_type` is not a `tf.DType` or if `shape` is not a
      `tf.TensorShape`.
    ValueError: If `shape` is not fully defined.
  """
    py_typecheck.check_type(scalar_type, tf.DType)
    py_typecheck.check_type(shape, tf.TensorShape)
    shape.assert_is_fully_defined()
    parameter_type = computation_types.TensorType(scalar_type, shape=())

    with tf.Graph().as_default() as graph:
        parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
            'x', parameter_type, graph)
        result = tf.broadcast_to(parameter_value, shape)
        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(parameter_type,
                                                    result_type)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=parameter_binding,
                               result=result_binding)
    return pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow)
コード例 #2
0
def _serialize_sequence_value(
        value: Union[type_conversions.TF_DATASET_REPRESENTATION_TYPES],
        type_spec: computation_types.SequenceType) -> _SerializeReturnType:
    """Serializes a `tf.data.Dataset` value into `executor_pb2.Value`.

  Args:
    value: A `tf.data.Dataset`, or equivalent.
    type_spec: A `computation_types.Type` specifying the TFF sequence type of
      `value.`

  Returns:
    A tuple `(value_proto, type_spec)` in which `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and
    `type_spec` is the type of the serialized value.
  """
    if not isinstance(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES):
        raise TypeError(
            'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
                py_typecheck.type_string(type(value)),
                type_spec if type_spec is not None else 'unknown'))

    value_type = computation_types.SequenceType(
        computation_types.to_type(value.element_spec))
    if not type_spec.is_assignable_from(value_type):
        raise TypeError(
            'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
            .format(value_type,
                    type_spec if type_spec is not None else 'unknown'))

    # TFF must store the type spec here because TF will lose the ordering of the
    # names for `tf.data.Dataset` that return elements of `collections.Mapping`
    # type. This allows TFF to preserve and restore the key ordering upon
    # deserialization.
    element_type = computation_types.to_type(value.element_spec)
    return executor_pb2.Value(sequence=executor_pb2.Value.Sequence(
        zipped_saved_model=_serialize_dataset(value),
        element_type=type_serialization.serialize_type(
            element_type))), type_spec
コード例 #3
0
  def test_partitioned_call_nodes(self):

    @tf.function
    def test():
      return tf.constant(1)

    with tf.Graph().as_default() as graph:
      result_type, result_binding = tensorflow_utils.capture_result_from_graph(
          test(), graph)

    function_type = computation_types.FunctionType(None, result_type)
    serialized_function_type = type_serialization.serialize_type(function_type)
    proto = pb.Computation(
        type=serialized_function_type,
        tensorflow=pb.TensorFlow(
            graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
            parameter=None,
            result=result_binding))

    self.assertCallOpsGrapplerNotDisabled(proto)
    transformed_proto = tensorflow_computation_transformations.disable_grappler_for_partitioned_calls(
        proto)
    self.assertCallOpsGrapplerDisabled(transformed_proto)
コード例 #4
0
    def test_something(self):
        # TODO(b/113112108): Revise these tests after a more complete implementation
        # is in place.

        # At the moment, this should succeed, as both the computation body and the
        # type are well-formed.
        computation_impl.ComputationImpl(
            pb.Computation(
                **{
                    'type':
                    type_serialization.serialize_type(
                        computation_types.FunctionType(tf.int32, tf.int32)),
                    'intrinsic':
                    pb.Intrinsic(uri='whatever')
                }), context_stack_impl.context_stack)

        # This should fail, as the proto is not well-formed.
        self.assertRaises(TypeError, computation_impl.ComputationImpl,
                          pb.Computation(), context_stack_impl.context_stack)

        # This should fail, as "10" is not an instance of pb.Computation.
        self.assertRaises(TypeError, computation_impl.ComputationImpl, 10,
                          context_stack_impl.context_stack)
コード例 #5
0
def create_dummy_computation_tensorflow_tuple():
    """Returns a tensorflow computation and type.

  `( -> <('a', float32), ('b', float32), ('c', float32)>)`
  """
    value = 10.0

    with tf.Graph().as_default() as graph:
        names = ['a', 'b', 'c']
        result = anonymous_tuple.AnonymousTuple(
            (n, tf.constant(value)) for n in names)
        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(None, result_type)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=None,
                               result=result_binding)
    value = pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow)
    return value, type_signature
コード例 #6
0
  def test_deserialize_federated_value_with_unset_member_type(self):
    x = 10
    x_type = computation_types.to_type(tf.int32)
    member_proto, _ = executor_serialization.serialize_value(x, x_type)
    fully_specified_type_at_clients = type_serialization.serialize_type(
        computation_types.at_clients(tf.int32))

    unspecified_member_federated_type = computation_pb2.FederatedType(
        placement=fully_specified_type_at_clients.federated.placement,
        all_equal=fully_specified_type_at_clients.federated.all_equal)

    federated_proto = executor_pb2.Value.Federated(
        type=unspecified_member_federated_type, value=[member_proto])
    federated_value_proto = executor_pb2.Value(federated=federated_proto)

    self.assertIsInstance(member_proto, executor_pb2.Value)
    self.assertIsInstance(federated_value_proto, executor_pb2.Value)

    deserialized_federated_value, deserialized_type_spec = executor_serialization.deserialize_value(
        federated_value_proto)
    self.assert_types_identical(deserialized_type_spec,
                                computation_types.at_clients(tf.int32))
    self.assertEqual(deserialized_federated_value, [10])
コード例 #7
0
    def test_valid_ops(self):
        @tf.function
        def test():
            return tf.constant(1)

        with tf.Graph().as_default() as graph:
            result_type, result_binding = tensorflow_utils.capture_result_from_graph(
                test(), graph)

        function_type = computation_types.FunctionType(None, result_type)
        serialized_function_type = type_serialization.serialize_type(
            function_type)
        proto = computation_pb2.Computation(
            type=serialized_function_type,
            tensorflow=computation_pb2.TensorFlow(
                graph_def=serialization_utils.pack_graph_def(
                    graph.as_graph_def()),
                parameter=None,
                result=result_binding))

        disallowed_op_names = frozenset(['ShardedFilename'])
        tensorflow_computation_transformations.check_no_disallowed_ops(
            proto, disallowed_op_names)
コード例 #8
0
  def test_gets_all_explicit_placement(self):

    with tf.Graph().as_default() as g:
      with tf.device('/cpu:0'):
        a = tf.constant(0)
        b = tf.constant(1)
        c = a + b

    _, result_binding = tensorflow_utils.capture_result_from_graph(c, g)

    packed_graph_def = serialization_utils.pack_graph_def(g.as_graph_def())
    function_type = computation_types.FunctionType(None, tf.int32)
    proto = pb.Computation(
        type=type_serialization.serialize_type(function_type),
        tensorflow=pb.TensorFlow(
            graph_def=packed_graph_def, parameter=None, result=result_binding))
    building_block = building_blocks.ComputationBuildingBlock.from_proto(proto)
    device_placements = building_block_analysis.get_device_placement_in(
        building_block)
    all_device_placements = list(device_placements.keys())
    self.assertLen(all_device_placements, 1)
    self.assertIn('CPU', all_device_placements[0])
    self.assertGreater(device_placements[all_device_placements[0]], 0)
コード例 #9
0
def create_xla_tff_computation(xla_computation, tensor_indexes, type_spec):
    """Creates an XLA TFF computation.

  Args:
    xla_computation: An instance of `xla_client.XlaComputation`.
    tensor_indexes: The list of tensor indexes to use in the parameter binding,
      in the order matching the order of flattened parameter in `type_spec`.
    type_spec: The TFF type of the computation to be constructed.

  Returns:
    An instance of `pb.Computation`.

  Raises:
    ValueError: if the arguments are invalid or incompatible with each other,
      e.g., because the TFF types mismatch.
  """
    py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
    py_typecheck.check_type(tensor_indexes, list)
    py_typecheck.check_type(type_spec, computation_types.FunctionType)
    parameter_binding = _make_xla_binding_for_type(tensor_indexes,
                                                   type_spec.parameter)
    result_binding = _make_xla_binding_for_type(
        list(range(len(structure.flatten(type_spec.result)))),
        type_spec.result)
    reconstructed_type = xla_computation_and_bindings_to_tff_type(
        xla_computation, parameter_binding, result_binding)
    py_typecheck.check_type(reconstructed_type, computation_types.FunctionType)
    expected_type = _remove_struct_element_names_from_tff_type(type_spec)
    if not reconstructed_type.is_equivalent_to(expected_type):
        raise ValueError(
            'The TFF type of the XLA computation {} does not match the expected '
            'TFF type {}.'.format(str(reconstructed_type), str(expected_type)))
    return pb.Computation(type=type_serialization.serialize_type(type_spec),
                          xla=pb.Xla(
                              hlo_module=pack_xla_computation(xla_computation),
                              parameter=parameter_binding,
                              result=result_binding))
コード例 #10
0
ファイル: serialization.py プロジェクト: tensorflow/federated
def _make_concrete_flat_output_fn(fn, *args, **kwargs):
    """Create a concrete function that has flattened output.

  TensorFlow SavedModel format requires flat structures of outputs, and
  cannot serialize custom Python classes (e.g. the BatchOutput attrs
  classes). Here we wrap the method in a `tf.function` that flattens its
  output. Then we repack the flattened output when loading the SavedModel.

  Args:
    fn: Function to wrap in `tf.function` decorator and concretize with
      arguments in `*args` and `**kwargs` for adding to a `tf.Module.
    *args: Positional arguments to `tf.function.get_concrete_function`.
    **kwargs: Keyword arguments to `tf.function.get_concrete_function`.

  Returns:
    A 2-tuple of concrete `tf.function` instance and a `tff.Type` protocol
    buffer message documenting the the result structure returned by the concrete
    function.
  """
    # Save the un-flattened type spec for deserialization later. Wrap in a lambda
    # because `tf.function` doesn't know how to deal with functools.Partial types
    # that we may have created earlier.
    structured_fn = lambda *args, **kwargs: fn(*args, **kwargs)  # pylint: disable=unnecessary-lambda
    concrete_fn = tf.function(structured_fn).get_concrete_function(
        *args, **kwargs)
    tensor_types = tf.nest.map_structure(computation_types.TensorType,
                                         concrete_fn.output_dtypes,
                                         concrete_fn.output_shapes)
    result_type_spec = type_serialization.serialize_type(
        computation_types.to_type(tensor_types))

    def flattened_output(*args, **kwargs):
        return tf.nest.flatten(fn(*args, **kwargs))

    flat_concrete_fn = tf.function(flattened_output).get_concrete_function(
        *args, **kwargs)
    return flat_concrete_fn, result_type_spec
コード例 #11
0
    def test_counts_correct_number_of_ops_with_function(self):
        @tf.function
        def add_one(x):
            return x + 1

        with tf.Graph().as_default() as graph:
            parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
                'x', tf.int32, graph)
            result = add_one(add_one(parameter_value))

        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)
        type_signature = computation_types.FunctionType(tf.int32, result_type)
        tensorflow = pb.TensorFlow(
            graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
            parameter=parameter_binding,
            result=result_binding)
        proto = pb.Computation(
            type=type_serialization.serialize_type(type_signature),
            tensorflow=tensorflow)
        building_block = building_blocks.ComputationBuildingBlock.from_proto(
            proto)

        tf_ops_in_graph = building_block_analysis.count_tensorflow_ops_in(
            building_block)

        # Expect 7 ops:
        #    Inside the tf.function:
        #      - one constant
        #      - one addition
        #      - one identity on the result
        #    Inside the tff_computation:
        #      - one placeholders (one for the argument)
        #      - two partition calls
        #      - one identity on the tff_computation result
        self.assertEqual(tf_ops_in_graph, 7)
コード例 #12
0
    def test_gets_none_placement(self):

        with tf.Graph().as_default() as g:
            a = tf.Variable(0, name='variable1')
            b = tf.Variable(1, name='variable2')
            c = a + b

        _, result_binding = tensorflow_utils.capture_result_from_graph(c, g)

        packed_graph_def = serialization_utils.pack_graph_def(g.as_graph_def())
        function_type = computation_types.FunctionType(None, tf.int32)
        proto = pb.Computation(
            type=type_serialization.serialize_type(function_type),
            tensorflow=pb.TensorFlow(graph_def=packed_graph_def,
                                     parameter=None,
                                     result=result_binding))
        building_block = building_blocks.ComputationBuildingBlock.from_proto(
            proto)
        device_placements = building_block_analysis.get_device_placement_in(
            building_block)
        all_device_placements = list(device_placements.keys())
        self.assertLen(all_device_placements, 1)
        self.assertEqual(all_device_placements[0], '')
        self.assertGreater(device_placements[''], 0)
コード例 #13
0
def create_computation_for_py_fn(
        fn: types.FunctionType,
        parameter_type: Optional[computation_types.Type]) -> pb.Computation:
    """Returns a tensorflow computation returning the result of `fn`.

  The returned computation has the type signature `(T -> U)`, where `T` is
  `parameter_type` and `U` is the type returned by `fn`.

  Args:
    fn: A Python function.
    parameter_type: A `computation_types.Type`.
  """
    py_typecheck.check_type(fn, types.FunctionType)
    if parameter_type is not None:
        py_typecheck.check_type(parameter_type, computation_types.Type)

    with tf.Graph().as_default() as graph:
        if parameter_type is not None:
            parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
                'x', parameter_type, graph)
            result = fn(parameter_value)
        else:
            parameter_binding = None
            result = fn()
        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(parameter_type,
                                                    result_type)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=parameter_binding,
                               result=result_binding)
    return pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow)
コード例 #14
0
    def test_returns_value_with_intrinsic_def_federated_value_at_server_and_tuple(
            self):
        executor = create_test_executor(number_of_clients=3)
        arg, arg_type = executor_test_utils.create_dummy_computation_tuple()
        intrinsic_def = intrinsic_defs.FEDERATED_VALUE_AT_SERVER
        comp_type = computation_types.FunctionType(
            arg_type, type_factory.at_server(arg_type))
        comp = pb.Computation(
            type=type_serialization.serialize_type(comp_type),
            intrinsic=pb.Intrinsic(uri=intrinsic_def.uri))

        comp = self.run_sync(executor.create_value(comp, comp_type))
        arg = self.run_sync(executor.create_value(arg, arg_type))
        result = self.run_sync(executor.create_call(comp, arg))

        self.assertIsInstance(result,
                              federating_executor.FederatingExecutorValue)
        self.assertEqual(result.type_signature.compact_representation(),
                         comp_type.result.compact_representation())
        actual_result = self.run_sync(result.compute())
        expected_result = [10.0] * 2
        for actual_element, expected_element in zip(actual_result,
                                                    expected_result):
            self.assertEqual(actual_element, expected_element)
コード例 #15
0
 def test_serialize_type_with_placement(self):
     actual_proto = type_serialization.serialize_type(
         computation_types.PlacementType())
     expected_proto = pb.Type(placement=pb.PlacementType())
     self.assertEqual(actual_proto, expected_proto)
コード例 #16
0
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
  """Serializes the 'target' as a TF computation with a given parameter type.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function or `tf.function`, with arguments
      matching the 'parameter_type'.
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    unpack: Whether to always unpack the parameter_type. Necessary for support
      of polymorphic tf2_computations.

  Returns:
    The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
      set.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
  py_typecheck.check_callable(target)
  parameter_type = computation_types.to_type(parameter_type)
  signature = function_utils.get_signature(target)
  if signature.parameters and parameter_type is None:
    raise ValueError(
        'Expected the target to declare no parameters, found {!r}.'.format(
            signature.parameters))

  # In the codepath for TF V1 based serialization (tff.tf_computation),
  # we get the "wrapped" function to serialize. Here, target is the
  # raw function to be wrapped; however, we still need to know if
  # the parameter_type should be unpacked into multiple args and kwargs
  # in order to construct the TensorSpecs to be passed in the call
  # to get_concrete_fn below.
  unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
  arg_typespecs, kwarg_typespecs, parameter_binding = (
      tensorflow_utils.get_tf_typespec_and_binding(
          parameter_type,
          arg_names=list(signature.parameters.keys()),
          unpack=unpack))

  # Pseudo-global to be appended to once when target_poly below is traced.
  type_and_binding_slot = []

  # N.B. To serialize a tf.function or eager python code,
  # the return type must be a flat list, tuple, or dict. However, the
  # tff.tf_computation must be able to handle structured inputs and outputs.
  # Thus, we intercept the result of calling the original target fn, introspect
  # its structure to create a result_type and bindings, and then return a
  # flat dict output. It is this new "unpacked" tf.function that we will
  # serialize using tf.saved_model.save.
  #
  # TODO(b/117428091): The return type limitation is primarily a limitation of
  # SignatureDefs  and therefore of the signatures argument to
  # tf.saved_model.save. tf.functions attached to objects and loaded back with
  # tf.saved_model.load can take/return nests; this might offer a better
  # approach to the one taken here.

  @tf.function
  def target_poly(*args, **kwargs):
    result = target(*args, **kwargs)
    result_dict, result_type, result_binding = (
        tensorflow_utils.get_tf2_result_dict_and_binding(result))
    assert not type_and_binding_slot
    # A "side channel" python output.
    type_and_binding_slot.append((result_type, result_binding))
    return result_dict

  # Triggers tracing so that type_and_binding_slot is filled.
  cc_fn = target_poly.get_concrete_function(*arg_typespecs, **kwarg_typespecs)
  assert len(type_and_binding_slot) == 1
  result_type, result_binding = type_and_binding_slot[0]

  # N.B. Note that cc_fn does *not* accept the same args and kwargs as the
  # Python target_poly; instead, it must be called with **kwargs based on the
  # unique names embedded in the TensorSpecs inside arg_typespecs and
  # kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
  # between these tensor names and the components of the (possibly nested) TFF
  # input type. When cc_fn is serialized, concrete tensors for each input are
  # introduced, and the call finalize_binding(parameter_binding,
  # sigs['serving_default'].inputs) updates the bindings to reference these
  # concrete tensors.

  # Associate vars with unique names and explicitly attach to the Checkpoint:
  var_dict = {
      'var{:02d}'.format(i): v for i, v in enumerate(cc_fn.graph.variables)
  }
  saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)

  try:
    # TODO(b/122081673): All we really need is the  meta graph def, we could
    # probably just load that directly, e.g., using parse_saved_model from
    # tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
    # depend on that presumably non-public symbol. Perhaps TF can expose a way
    # to just get the MetaGraphDef directly without saving to a tempfile? This
    # looks like a small change to v2.saved_model.save().
    outdir = tempfile.mkdtemp('savedmodel')
    tf.saved_model.save(saveable, outdir, signatures=cc_fn)

    graph = tf.Graph()
    with tf.compat.v1.Session(graph=graph) as sess:
      mgd = tf.compat.v1.saved_model.load(
          sess, tags=[tf.saved_model.SERVING], export_dir=outdir)
  finally:
    shutil.rmtree(outdir)
  sigs = mgd.signature_def

  # TODO(b/123102455): Figure out how to support the init_op. The meta graph def
  # contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
  # probably won't do what we want, because it will want to read from
  # Checkpoints, not just run Variable initializerse (?). The right solution may
  # be to grab the target_poly.get_initialization_function(), and save a sig for
  # that.

  # Now, traverse the signature from the MetaGraphDef to find
  # find the actual tensor names and write them into the bindings.
  finalize_binding(parameter_binding, sigs['serving_default'].inputs)
  finalize_binding(result_binding, sigs['serving_default'].outputs)

  annotated_type = computation_types.FunctionType(parameter_type, result_type)

  return pb.Computation(
      type=pb.Type(
          function=pb.FunctionType(
              parameter=type_serialization.serialize_type(parameter_type),
              result=type_serialization.serialize_type(result_type))),
      tensorflow=pb.TensorFlow(
          graph_def=serialization_utils.pack_graph_def(mgd.graph_def),
          parameter=parameter_binding,
          result=result_binding)), annotated_type
コード例 #17
0
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
  """Serializes the 'target' as a TF computation with a given parameter type.

  See also `serialize_tf2_as_tf_computation` for TensorFlow 2
  serialization.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function. In the future, we will add here
      support for serializing the various kinds of non-eager and eager
      functions, and eventually aim at full support for and compliance with TF
      2.0. This function is currently required to declare either zero parameters
      if `parameter_type` is `None`, or exactly one parameter if it's not
      `None`.  The nested structure of this parameter must correspond to the
      structure of the 'parameter_type'. In the future, we may support targets
      with multiple args/keyword args (to be documented in the API and
      referenced from here).
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    context_stack: The context stack to use.

  Returns:
    A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
    the instance with the `pb.TensorFlow` variant set, and the type is an
    instance of `tff.Type`, potentially including Python container annotations,
    for use by TensorFlow computation wrappers.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
  # TODO(b/113112108): Support a greater variety of target type signatures,
  # with keyword args or multiple args corresponding to elements of a tuple.
  # Document all accepted forms with examples in the API, and point to there
  # from here.

  py_typecheck.check_type(target, types.FunctionType)
  py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
  parameter_type = computation_types.to_type(parameter_type)
  signature = function_utils.get_signature(target)

  with tf.Graph().as_default() as graph:
    args = []
    if parameter_type is not None:
      if len(signature.parameters) != 1:
        raise ValueError(
            'Expected the target to declare exactly one parameter, found {!r}.'
            .format(signature.parameters))
      parameter_name = next(iter(signature.parameters))
      parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
          parameter_name, parameter_type, graph)
      args.append(parameter_value)
    else:
      if signature.parameters:
        raise ValueError(
            'Expected the target to declare no parameters, found {!r}.'.format(
                signature.parameters))
      parameter_binding = None
    context = tf_computation_context.TensorFlowComputationContext(graph)
    with context_stack.install(context):
      result = target(*args)

      # TODO(b/122081673): This needs to change for TF 2.0. We may also
      # want to allow the person creating a tff.tf_computation to specify
      # a different initializer; e.g., if it is known that certain
      # variables will be assigned immediately to arguments of the function,
      # then it is wasteful to initialize them before this.
      #
      # The following is a bit of a work around: the collections below may
      # contain variables more than once, hence we throw into a set. TFF needs
      # to ensure all variables are initialized, but not all variables are
      # always in the collections we expect. tff.learning._KerasModel tries to
      # pull Keras variables (that may or may not be in GLOBAL_VARIABLES) into
      # VARS_FOR_TFF_TO_INITIALIZE for now.
      all_variables = set(tf.compat.v1.global_variables() +
                          tf.compat.v1.local_variables() +
                          tf.compat.v1.get_collection(
                              graph_keys.GraphKeys.VARS_FOR_TFF_TO_INITIALIZE))
      if all_variables:
        # Use a readable but not-too-long name for the init_op.
        name = 'init_op_for_' + '_'.join(
            [v.name.replace(':0', '') for v in all_variables])
        if len(name) > 50:
          name = 'init_op_for_{}_variables'.format(len(all_variables))
        with tf.control_dependencies(context.init_ops):
          # Before running the main new init op, run any initializers for sub-
          # computations from context.init_ops. Variables from import_graph_def
          # will not make it into the global collections, and so will not be
          # initialized without this code path.
          init_op_name = tf.group(
              tf.compat.v1.initializers.variables(all_variables, name=name),
              *tf.compat.v1.get_collection(
                  tf.compat.v1.GraphKeys.TABLE_INITIALIZERS)).name
      elif context.init_ops:
        init_op_name = tf.group(
            *context.init_ops, name='subcomputation_init_ops').name
      else:
        init_op_name = None

    result_type, result_binding = tensorflow_utils.capture_result_from_graph(
        result, graph)

  annotated_type = computation_types.FunctionType(parameter_type, result_type)

  # WARNING: we do not really want to be modifying the graph here if we can
  # avoid it. This is purely to work around performance issues uncovered with
  # the non-standard usage of Tensorflow and have been discussed with the
  # Tensorflow core team before being added.
  clean_graph_def = _clean_graph_def(graph.as_graph_def())

  return pb.Computation(
      type=pb.Type(
          function=pb.FunctionType(
              parameter=type_serialization.serialize_type(parameter_type),
              result=type_serialization.serialize_type(result_type))),
      tensorflow=pb.TensorFlow(
          graph_def=serialization_utils.pack_graph_def(clean_graph_def),
          parameter=parameter_binding,
          result=result_binding,
          initialize_op=init_op_name)), annotated_type
コード例 #18
0
ファイル: serialization.py プロジェクト: tensorflow/federated
def save_functional_model(functional_model: functional.FunctionalModel,
                          path: str):
    """Serializes a `FunctionalModel` as a `tf.SavedModel` to `path`.

  Args:
    functional_model: A `tff.learning.models.FunctionalModel`.
    path: A `str` directory path to serialize the model to.
  """
    m = tf.Module()
    # Serialize the initial_weights values as a tf.function that creates a
    # structure of tensors with the initial weights. This way we can add it to the
    # tf.SavedModel and call it to create initial weights after deserialization.
    create_initial_weights = lambda: functional_model.initial_weights
    with tf.Graph().as_default():
        concrete_structured_fn = tf.function(
            create_initial_weights).get_concrete_function()
    model_weights_tensor_specs = tf.nest.map_structure(
        tf.TensorSpec.from_tensor, concrete_structured_fn.structured_outputs)
    initial_weights_result_type_spec = type_serialization.serialize_type(
        computation_types.to_type(model_weights_tensor_specs))
    m.create_initial_weights_type_spec = tf.Variable(
        initial_weights_result_type_spec.SerializeToString(deterministic=True))

    def flat_initial_weights():
        return tf.nest.flatten(create_initial_weights())

    with tf.Graph().as_default():
        m.create_initial_weights = tf.function(
            flat_initial_weights).get_concrete_function()

    # Serialize forward pass concretely, once for training and once for
    # non-training.
    # TODO(b/198150431): try making `training` a `tf.Tensor` parameter to remove
    # the need to for serializing two different function graphs.
    def make_concrete_flat_forward_pass(training: bool):
        """Create a concrete forward_pass function that has flattened output.

    Args:
      training: A boolean indicating whether this is a call in a training loop,
        or evaluation loop.

    Returns:
      A 2-tuple of concrete `tf.function` instance and a `tff.Type` protocol
      buffer message documenting the the result structure returned by the
      concrete function.
    """
        # Save the un-flattened type spec for deserialization later.
        # Note: `training` is a Python boolean, which gets "curried", in a sense,
        # during function conretization. The resulting concrete function only has
        # parameters for `model_weights` and `batch_input`, which are
        # `tf.TensorSpec` structures here.
        with tf.Graph().as_default():
            concrete_structured_fn = functional_model.forward_pass.get_concrete_function(
                model_weights_tensor_specs,
                functional_model.input_spec,
                # Note: training does not appear in the resulting concrete function.
                training=training)
        output_tensor_spec_structure = tf.nest.map_structure(
            tf.TensorSpec.from_tensor,
            concrete_structured_fn.structured_outputs)
        result_type_spec = type_serialization.serialize_type(
            computation_types.to_type(output_tensor_spec_structure))

        @tf.function
        def flat_forward_pass(model_weights, batch_input, training):
            return tf.nest.flatten(
                functional_model.forward_pass(model_weights, batch_input,
                                              training))

        with tf.Graph().as_default():
            flat_concrete_fn = flat_forward_pass.get_concrete_function(
                model_weights_tensor_specs,
                functional_model.input_spec,
                # Note: training does not appear in the resulting concrete function.
                training=training)
        return flat_concrete_fn, result_type_spec

    fw_pass_training, fw_pass_training_type_spec = make_concrete_flat_forward_pass(
        training=True)
    m.flat_forward_pass_training = fw_pass_training
    m.forward_pass_training_type_spec = tf.Variable(
        fw_pass_training_type_spec.SerializeToString(deterministic=True),
        trainable=False)

    fw_pass_inference, fw_pass_inference_type_spec = make_concrete_flat_forward_pass(
        training=False)
    m.flat_forward_pass_inference = fw_pass_inference
    m.forward_pass_inference_type_spec = tf.Variable(
        fw_pass_inference_type_spec.SerializeToString(deterministic=True),
        trainable=False)

    # Serialize predict_on_batch, once for training, once for non-training.
    x_type = functional_model.input_spec[0]

    # TODO(b/198150431): try making `training` a `tf.Tensor` parameter to remove
    # the need to for serializing two different function graphs.
    def make_concrete_flat_predict_on_batch(training: bool):
        """Create a concrete predict_on_batch function that has flattened output.

    Args:
      training: A boolean indicating whether this is a call in a training loop,
        or evaluation loop.

    Returns:
      A 2-tuple of concrete `tf.function` instance and a `tff.Type` protocol
      buffer message documenting the the result structure returned by the
      concrete
      function.
    """
        # Save the un-flattened type spec for deserialization later.
        # Note: `training` is a Python boolean, which gets "curried", in a sense,
        # during function conretization. The resulting concrete function only has
        # parameters for `model_weights` and `batch_input`, which are
        # `tf.TensorSpec` structures here.
        concrete_structured_fn = tf.function(
            functional_model.predict_on_batch
        ).get_concrete_function(
            model_weights_tensor_specs,
            x_type,
            # Note: training does not appear in the resulting concrete function.
            training=training)
        output_tensor_spec_structure = tf.nest.map_structure(
            tf.TensorSpec.from_tensor,
            concrete_structured_fn.structured_outputs)
        result_type_spec = type_serialization.serialize_type(
            computation_types.to_type(output_tensor_spec_structure))

        @tf.function
        def flat_predict_on_batch(model_weights, x, training):
            return tf.nest.flatten(
                functional_model.predict_on_batch(model_weights, x, training))

        flat_concrete_fn = tf.function(
            flat_predict_on_batch
        ).get_concrete_function(
            model_weights_tensor_specs,
            x_type,
            # Note: training does not appear in the resulting concrete function.
            training=training)
        return flat_concrete_fn, result_type_spec

    with tf.Graph().as_default():
        predict_training, predict_training_type_spec = make_concrete_flat_predict_on_batch(
            training=True)
    m.predict_on_batch_training = predict_training
    m.predict_on_batch_training_type_spec = tf.Variable(
        predict_training_type_spec.SerializeToString(deterministic=True),
        trainable=False)

    with tf.Graph().as_default():
        predict_inference, predict_inference_type_spec = make_concrete_flat_predict_on_batch(
            training=False)
    m.predict_on_batch_inference = predict_inference
    m.predict_on_batch_inference_type_spec = tf.Variable(
        predict_inference_type_spec.SerializeToString(deterministic=True),
        trainable=False)

    # Serialize TFF values as string variables that contain the serialized
    # protos from the computation or the type.
    m.serialized_input_spec = tf.Variable(type_serialization.serialize_type(
        computation_types.to_type(
            functional_model.input_spec)).SerializeToString(
                deterministic=True),
                                          trainable=False)

    # Save everything
    _save_tensorflow_module(m, path)
コード例 #19
0
ファイル: serialization.py プロジェクト: tensorflow/federated
def save(model: model_lib.Model, path: str, input_type=None) -> None:
    """Serializes `model` as a TensorFlow SavedModel to `path`.

  The resulting SavedModel will contain the default serving signature, which
  can be used with the TFLite converter to create a TFLite flatbuffer for
  inference.

  NOTE: The model returned by `tff.learning.models.load` will _not_ be the same
  Python type as the saved model. If the model serialized using this method is
  a subclass of `tff.learning.Model`, that subclass is _not_ returned. All
  method behavior is retained, but the Python type does not cross serialization
  boundaries. The return type of `metric_finalizers` will be an OrderedDict of
  str to `tff.tf_computation` (annotated TFF computations) which could be
  different from that of the model before serialization.

  Args:
    model: The `tff.learning.Model` to save.
    path: The `str` directory path to serialize the model to.
    input_type: An optional structure of `tf.TensorSpec`s representing the
      expected input of `model.predict_on_batch`, to override reading from
      `model.input_spec`. Typically this will be similar to `model.input_spec`,
      with any example labels removed. If None, default to
      `model.input_spec['x']` if the input_spec is a mapping, otherwise default
      to `model.input_spec[0]`.
  """
    py_typecheck.check_type(model, model_lib.Model)
    py_typecheck.check_type(path, str)
    if not path:
        raise ValueError('`path` must be a non-empty string, cannot serialize '
                         'models without an output path.')
    if isinstance(model, _LoadedSavedModel):
        # If we're saving a previously loaded model, we can simply use the module
        # already internal to the Model.
        _save_tensorflow_module(model._loaded_module, path)  # pylint: disable=protected-access
        return

    m = tf.Module()
    # We prefixed with `tff_` because `trainable_variables` is an attribute
    # reserved by `tf.Module`.
    m.tff_trainable_variables = model.trainable_variables
    m.tff_non_trainable_variables = model.non_trainable_variables
    m.tff_local_variables = model.local_variables
    # Serialize forward_pass. We must get two concrete versions of the
    # function, as the `training` argument is a Python value that changes the
    # graph computation. We serialize the output type so that we can repack the
    # flattened values after loaded the saved model.
    forward_pass_training = _make_concrete_flat_output_fn(
        functools.partial(model.forward_pass, training=True), model.input_spec)
    m.flat_forward_pass_training = forward_pass_training[0]
    m.forward_pass_training_type_spec = tf.Variable(
        forward_pass_training[1].SerializeToString(deterministic=True),
        trainable=False)

    forward_pass_inference = _make_concrete_flat_output_fn(
        functools.partial(model.forward_pass, training=False),
        model.input_spec)
    m.flat_forward_pass_inference = forward_pass_inference[0]
    m.forward_pass_inference_type_spec = tf.Variable(
        forward_pass_inference[1].SerializeToString(deterministic=True),
        trainable=False)
    # Get model prediction input type. If `None`, default to assuming the 'x' key
    # or first element of the model input spec is the input.
    if input_type is None:
        if isinstance(model.input_spec, collections.abc.Mapping):
            input_type = model.input_spec['x']
        else:
            input_type = model.input_spec[0]
    # Serialize predict_on_batch. We must get two concrete versions of the
    # function, as the `training` argument is a Python value that changes the
    # graph computation.
    predict_on_batch_training = _make_concrete_flat_output_fn(
        functools.partial(model.predict_on_batch, training=True), input_type)
    m.predict_on_batch_training = predict_on_batch_training[0]
    m.predict_on_batch_training_type_spec = tf.Variable(
        predict_on_batch_training[1].SerializeToString(deterministic=True),
        trainable=False)
    predict_on_batch_inference = _make_concrete_flat_output_fn(
        functools.partial(model.predict_on_batch, training=False), input_type)
    m.predict_on_batch_inference = predict_on_batch_inference[0]
    m.predict_on_batch_inference_type_spec = tf.Variable(
        predict_on_batch_inference[1].SerializeToString(deterministic=True),
        trainable=False)

    # Serialize the report_local_unfinalized_metrics tf.function.
    m.report_local_unfinalized_metrics = (
        model.report_local_unfinalized_metrics.get_concrete_function())

    # Serialize the metric_finalizers as `tf.Variable`s.
    m.serialized_metric_finalizers = collections.OrderedDict()

    def serialize_metric_finalizer(finalizer, metric_type):
        finalizer_computation = tensorflow_computation.tf_computation(
            finalizer, metric_type)
        return tf.Variable(computation_serialization.serialize_computation(
            finalizer_computation).SerializeToString(deterministic=True),
                           trainable=False)

    for metric_name, finalizer in model.metric_finalizers().items():
        metric_type = type_conversions.type_from_tensors(
            model.report_local_unfinalized_metrics()[metric_name])
        m.serialized_metric_finalizers[
            metric_name] = serialize_metric_finalizer(finalizer, metric_type)

    # Serialize the TFF values as string variables that contain the serialized
    # protos from the computation or the type.
    m.serialized_input_spec = tf.Variable(type_serialization.serialize_type(
        computation_types.to_type(
            model.input_spec)).SerializeToString(deterministic=True),
                                          trainable=False)

    # Serialize the reset_metrics tf.function.
    try:
        m.reset_metrics = (model.reset_metrics.get_concrete_function())
    except NotImplementedError:
        m.reset_metrics = None

    _save_tensorflow_module(m, path)
コード例 #20
0
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
    """Serializes the 'target' as a TF computation with a given parameter type.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function. In the future, we will add here
      support for serializing the various kinds of non-eager and eager
      functions, and eventually aim at full support for and compliance with TF
      2.0. This function is currently required to declare either zero parameters
      if `parameter_type` is `None`, or exactly one parameter if it's not
      `None`.  The nested structure of this parameter must correspond to the
      structure of the 'parameter_type'. In the future, we may support targets
      with multiple args/keyword args (to be documented in the API and
      referenced from here).
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `computation_types.Type`.
    context_stack: The context stack to use.

  Returns:
    A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
    the instance with the `pb.TensorFlow` variant set, and the type is an
    instance of `tff.Type`, potentially including Python container annotations,
    for use by TensorFlow computation wrappers.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    # TODO(b/113112108): Support a greater variety of target type signatures,
    # with keyword args or multiple args corresponding to elements of a tuple.
    # Document all accepted forms with examples in the API, and point to there
    # from here.

    py_typecheck.check_type(target, types.FunctionType)
    py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
    if parameter_type is not None:
        py_typecheck.check_type(parameter_type, computation_types.Type)
    signature = function_utils.get_signature(target)

    with tf.Graph().as_default() as graph:
        if parameter_type is not None:
            if len(signature.parameters) != 1:
                raise ValueError(
                    'Expected the target to declare exactly one parameter, found {!r}.'
                    .format(signature.parameters))
            parameter_name = next(iter(signature.parameters))
            parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
                parameter_name, parameter_type, graph)
        else:
            if signature.parameters:
                raise ValueError(
                    'Expected the target to declare no parameters, found {!r}.'
                    .format(signature.parameters))
            parameter_value = None
            parameter_binding = None
        context = tensorflow_computation_context.TensorFlowComputationContext(
            graph)
        with context_stack.install(context):
            with variable_utils.record_variable_creation_scope(
            ) as all_variables:
                if parameter_value is not None:
                    result = target(parameter_value)
                else:
                    result = target()
            initializer_ops = []
            if all_variables:
                # Use a readable but not-too-long name for the init_op.
                name = 'init_op_for_' + '_'.join(
                    [v.name.replace(':0', '') for v in all_variables])
                if len(name) > 50:
                    name = 'init_op_for_{}_variables'.format(
                        len(all_variables))
                initializer_ops.append(
                    tf.compat.v1.initializers.variables(all_variables,
                                                        name=name))
            initializer_ops.extend(
                tf.compat.v1.get_collection(
                    tf.compat.v1.GraphKeys.TABLE_INITIALIZERS))
            if initializer_ops:
                # Before running the main new init op, run any initializers for sub-
                # computations from context.init_ops. Variables from import_graph_def
                # will not make it into the global collections, and so will not be
                # initialized without this code path.
                with tf.compat.v1.control_dependencies(context.init_ops):
                    init_op_name = tf.group(*initializer_ops,
                                            name='grouped_initializers').name
            elif context.init_ops:
                init_op_name = tf.group(*context.init_ops,
                                        name='subcomputation_init_ops').name
            else:
                init_op_name = None

        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(parameter_type,
                                                    result_type)

    # WARNING: we do not really want to be modifying the graph here if we can
    # avoid it. This is purely to work around performance issues uncovered with
    # the non-standard usage of Tensorflow and have been discussed with the
    # Tensorflow core team before being added.
    clean_graph_def = _clean_graph_def(graph.as_graph_def())
    tensorflow = pb.TensorFlow(
        graph_def=serialization_utils.pack_graph_def(clean_graph_def),
        parameter=parameter_binding,
        result=result_binding,
        initialize_op=init_op_name)
    return pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow), type_signature
コード例 #21
0
def tf_computation_serializer(parameter_type: Optional[computation_types.Type],
                              context_stack):
    """Serializes a TF computation with a given parameter type.

  Args:
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `computation_types.Type`.
    context_stack: The context stack to use.

  Yields:
    The first yielded value will be a Python object (such as a dataset,
    a placeholder, or a `structure.Struct`) to be passed to the function to
    serialize. The result of the function should then be passed to the
    following `send` call.
    The next yielded value will be
    a tuple of (`pb.Computation`, `tff.Type`), where the computation contains
    the instance with the `pb.TensorFlow` variant set, and the type is an
    instance of `tff.Type`, potentially including Python container annotations,
    for use by TensorFlow computation wrappers.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    # TODO(b/113112108): Support a greater variety of target type signatures,
    # with keyword args or multiple args corresponding to elements of a tuple.
    # Document all accepted forms with examples in the API, and point to there
    # from here.

    py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
    if parameter_type is not None:
        py_typecheck.check_type(parameter_type, computation_types.Type)

    with tf.Graph().as_default() as graph:
        if parameter_type is not None:
            parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
                'arg', parameter_type, graph)
        else:
            parameter_value = None
            parameter_binding = None
        context = tensorflow_computation_context.TensorFlowComputationContext(
            graph)
        with context_stack.install(context):
            with variable_utils.record_variable_creation_scope(
            ) as all_variables:
                result = yield parameter_value
            initializer_ops = []
            if all_variables:
                # Use a readable but not-too-long name for the init_op.
                name = 'init_op_for_' + '_'.join(
                    [v.name.replace(':0', '') for v in all_variables])
                if len(name) > 50:
                    name = 'init_op_for_{}_variables'.format(
                        len(all_variables))
                initializer_ops.append(
                    tf.compat.v1.initializers.variables(all_variables,
                                                        name=name))
            initializer_ops.extend(
                tf.compat.v1.get_collection(
                    tf.compat.v1.GraphKeys.TABLE_INITIALIZERS))
            if initializer_ops:
                # Before running the main new init op, run any initializers for sub-
                # computations from context.init_ops. Variables from import_graph_def
                # will not make it into the global collections, and so will not be
                # initialized without this code path.
                with tf.compat.v1.control_dependencies(context.init_ops):
                    init_op_name = tf.group(*initializer_ops,
                                            name='grouped_initializers').name
            elif context.init_ops:
                init_op_name = tf.group(*context.init_ops,
                                        name='subcomputation_init_ops').name
            else:
                init_op_name = None

        result_type, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(parameter_type,
                                                    result_type)

    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=parameter_binding,
                               result=result_binding,
                               initialize_op=init_op_name)
    yield pb.Computation(
        type=type_serialization.serialize_type(type_signature),
        tensorflow=tensorflow), type_signature
コード例 #22
0
def _make_sequence_reduce_value(executor, element_type, accumulator_type):
    intrinsic_spec = pb.Intrinsic(uri=intrinsic_defs.SEQUENCE_REDUCE.uri)
    type_spec = _make_sequence_reduce_type(element_type, accumulator_type)
    comp_pb = pb.Computation(type=type_serialization.serialize_type(type_spec),
                             intrinsic=intrinsic_spec)
    return _run_sync(executor.create_value(comp_pb, type_spec))
コード例 #23
0
 def test_serialize_tensor_type(self, dtype, shape):
     actual_proto = type_serialization.serialize_type((dtype, shape))
     expected_proto = pb.Type(tensor=pb.TensorType(
         dtype=dtype.as_datatype_enum, dims=_shape_to_dims(shape)))
     self.assertEqual(actual_proto, expected_proto)
コード例 #24
0
def _make_sequence_map_value(executor, source_type, target_type):
    intrinsic_spec = pb.Intrinsic(uri=intrinsic_defs.SEQUENCE_MAP.uri)
    type_spec = _make_sequence_map_type(source_type, target_type)
    comp_pb = pb.Computation(type=type_serialization.serialize_type(type_spec),
                             intrinsic=intrinsic_spec)
    return _run_sync(executor.create_value(comp_pb, type_spec))
コード例 #25
0
 def test_serialize_type_with_string_sequence(self):
     actual_proto = type_serialization.serialize_type(
         computation_types.SequenceType(tf.string))
     expected_proto = pb.Type(sequence=pb.SequenceType(
         element=_create_scalar_tensor_type(tf.string)))
     self.assertEqual(actual_proto, expected_proto)
コード例 #26
0
def create_binary_operator(
    operator, operand_type: computation_types.Type) -> pb.Computation:
  """Returns a tensorflow computation computing a binary operation.

  The returned computation has the type signature `(<T,T> -> U)`, where `T` is
  `operand_type` and `U` is the result of applying the `operator` to a tuple of
  type `<T,T>`

  Note: If `operand_type` is a `computation_types.NamedTupleType`, then
  `operator` will be applied pointwise. This places the burden on callers of
  this function to construct the correct values to pass into the returned
  function. For example, to divide `[2, 2]` by `2`, first `2` must be packed
  into the data structure `[x, x]`, before the division operator of the
  appropriate type is called.

  Args:
    operator: A callable taking two arguments representing the operation to
      encode For example: `tf.math.add`, `tf.math.multiply`, and
        `tf.math.divide`.
    operand_type: A `computation_types.Type` to use as the argument to the
      constructed binary operator; must contain only named tuples and tensor
      types.

  Raises:
    TypeError: If the constraints of `operand_type` are violated or `operator`
      is not callable.
  """
  if not type_analysis.is_generic_op_compatible_type(operand_type):
    raise TypeError(
        'The type {} contains a type other than `computation_types.TensorType` '
        'and `computation_types.NamedTupleType`; this is disallowed in the '
        'generic operators.'.format(operand_type))
  py_typecheck.check_callable(operator)
  with tf.Graph().as_default() as graph:
    operand_1_value, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
        'x', operand_type, graph)
    operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
        'y', operand_type, graph)

    if isinstance(operand_type, computation_types.TensorType):
      result_value = operator(operand_1_value, operand_2_value)
    elif isinstance(operand_type, computation_types.NamedTupleType):
      result_value = anonymous_tuple.map_structure(operator, operand_1_value,
                                                   operand_2_value)
    else:
      raise TypeError(
          'Operand type {} cannot be used in generic operations. The whitelist '
          'in `type_analysis.is_generic_op_compatible_type` has allowed it to '
          'pass, and should be updated.'.format(operand_type))
    result_type, result_binding = tensorflow_utils.capture_result_from_graph(
        result_value, graph)

  type_signature = computation_types.FunctionType([operand_type, operand_type],
                                                  result_type)
  parameter_binding = pb.TensorFlow.Binding(
      tuple=pb.TensorFlow.NamedTupleBinding(
          element=[operand_1_binding, operand_2_binding]))
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=parameter_binding,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow)
コード例 #27
0
def create_binary_operator_with_upcast(
    type_signature: computation_types.Type,
    operator: Callable[[Any, Any], Any]) -> pb.Computation:
  """Creates TF computation upcasting its argument and applying `operator`.

  Args:
    type_signature: Value convertible to `computation_types.NamedTupleType`,
      with two elements, both of the same type or the second able to be upcast
      to the first, as explained in `apply_binary_operator_with_upcast`, and
      both containing only tuples and tensors in their type tree.
    operator: Callable defining the operator.

  Returns:
    A `building_blocks.CompiledComputation` encapsulating a function which
    upcasts the second element of its argument and applies the binary
    operator.
  """

  py_typecheck.check_callable(operator)
  type_signature = computation_types.to_type(type_signature)
  type_analysis.check_tensorflow_compatible_type(type_signature)
  if not isinstance(
      type_signature,
      computation_types.NamedTupleType) or len(type_signature) != 2:
    raise TypeError('To apply a binary operator, we must by definition have an '
                    'argument which is a `NamedTupleType` with 2 elements; '
                    'asked to create a binary operator for type: {t}'.format(
                        t=type_signature))
  if type_analysis.contains_types(type_signature,
                                  computation_types.SequenceType):
    raise TypeError(
        'Applying binary operators in TensorFlow is only '
        'supported on Tensors and NamedTupleTypes; you '
        'passed {t} which contains a SequenceType.'.format(t=type_signature))

  def _pack_into_type(to_pack, type_spec):
    """Pack Tensor value `to_pack` into the nested structure `type_spec`."""
    if isinstance(type_spec, computation_types.NamedTupleType):
      elem_iter = anonymous_tuple.iter_elements(type_spec)
      return anonymous_tuple.AnonymousTuple([
          (elem_name, _pack_into_type(to_pack, elem_type))
          for elem_name, elem_type in elem_iter
      ])
    elif isinstance(type_spec, computation_types.TensorType):
      return tf.broadcast_to(to_pack, type_spec.shape)

  with tf.Graph().as_default() as graph:
    first_arg, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
        'x', type_signature[0], graph)
    operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
        'y', type_signature[1], graph)
    if type_signature[0].is_equivalent_to(type_signature[1]):
      second_arg = operand_2_value
    else:
      second_arg = _pack_into_type(operand_2_value, type_signature[0])

    if isinstance(type_signature[0], computation_types.TensorType):
      result_value = operator(first_arg, second_arg)
    elif isinstance(type_signature[0], computation_types.NamedTupleType):
      result_value = anonymous_tuple.map_structure(operator, first_arg,
                                                   second_arg)
    else:
      raise TypeError('Encountered unexpected type {t}; can only handle Tensor '
                      'and NamedTupleTypes.'.format(t=type_signature[0]))

  result_type, result_binding = tensorflow_utils.capture_result_from_graph(
      result_value, graph)

  type_signature = computation_types.FunctionType(type_signature, result_type)
  parameter_binding = pb.TensorFlow.Binding(
      tuple=pb.TensorFlow.NamedTupleBinding(
          element=[operand_1_binding, operand_2_binding]))
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=parameter_binding,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow)
コード例 #28
0
def create_constant(scalar_value,
                    type_spec: computation_types.Type) -> pb.Computation:
  """Returns a tensorflow computation returning a constant `scalar_value`.

  The returned computation has the type signature `( -> T)`, where `T` is
  `type_spec`.

  `scalar_value` must be a scalar, and cannot be a float if any of the tensor
  leaves of `type_spec` contain an integer data type. `type_spec` must contain
  only named tuples and tensor types, but these can be arbitrarily nested.

  Args:
    scalar_value: A scalar value to place in all the tensor leaves of
      `type_spec`.
    type_spec: A `computation_types.Type` to use as the argument to the
      constructed binary operator; must contain only named tuples and tensor
      types.

  Raises:
    TypeError: If the constraints of `type_spec` are violated.
  """
  if not type_analysis.is_generic_op_compatible_type(type_spec):
    raise TypeError(
        'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
        ' only nested tuples and tensors are permitted.'.format(type_spec))
  inferred_scalar_value_type = type_conversions.infer_type(scalar_value)
  if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
      or inferred_scalar_value_type.shape != tf.TensorShape(())):
    raise TypeError(
        'Must pass a scalar value to `create_tensorflow_constant`; encountered '
        'a value {}'.format(scalar_value))
  tensor_dtypes_in_type_spec = []

  def _pack_dtypes(type_signature):
    """Appends dtype of `type_signature` to nonlocal variable."""
    if isinstance(type_signature, computation_types.TensorType):
      tensor_dtypes_in_type_spec.append(type_signature.dtype)
    return type_signature, False

  type_transformations.transform_type_postorder(type_spec, _pack_dtypes)

  if (any(x.is_integer for x in tensor_dtypes_in_type_spec) and
      not inferred_scalar_value_type.dtype.is_integer):
    raise TypeError(
        'Only integers can be used as scalar values if our desired constant '
        'type spec contains any integer tensors; passed scalar {} of dtype {} '
        'for type spec {}.'.format(scalar_value,
                                   inferred_scalar_value_type.dtype, type_spec))

  result_type = type_spec

  def _create_result_tensor(type_spec, scalar_value):
    """Packs `scalar_value` into `type_spec` recursively."""
    if isinstance(type_spec, computation_types.TensorType):
      type_spec.shape.assert_is_fully_defined()
      result = tf.constant(
          scalar_value, dtype=type_spec.dtype, shape=type_spec.shape)
    else:
      elements = []
      for _, type_element in anonymous_tuple.iter_elements(type_spec):
        elements.append(_create_result_tensor(type_element, scalar_value))
      result = elements
    return result

  with tf.Graph().as_default() as graph:
    result = _create_result_tensor(result_type, scalar_value)
    _, result_binding = tensorflow_utils.capture_result_from_graph(
        result, graph)

  type_signature = computation_types.FunctionType(None, result_type)
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=None,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow)
コード例 #29
0
def serialize_value(value, type_spec=None):
    """Serializes a value into `executor_pb2.Value`.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and the
    returned `ret_type_spec` is an instance of `tff.Type` that represents the
    TFF type of the serialized value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        type_spec = type_utils.reconcile_value_type_with_type_spec(
            type_serialization.deserialize_type(value.type), type_spec)
        return executor_pb2.Value(computation=value), type_spec
    elif isinstance(value, computation_impl.ComputationImpl):
        return serialize_value(
            computation_impl.ComputationImpl.get_proto(value),
            type_utils.reconcile_value_with_type_spec(value, type_spec))
    elif isinstance(type_spec, computation_types.TensorType):
        return serialize_tensor_value(value, type_spec)
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elements = anonymous_tuple.to_elements(type_spec)
        val_elements = anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value))
        tup_elems = []
        for (e_name, e_type), (_, e_val) in zip(type_elements, val_elements):
            e_proto, _ = serialize_value(e_val, e_type)
            tup_elems.append(
                executor_pb2.Value.Tuple.Element(
                    name=e_name if e_name else None, value=e_proto))
        result_proto = (executor_pb2.Value(tuple=executor_pb2.Value.Tuple(
            element=tup_elems)))
        return result_proto, type_spec
    elif isinstance(type_spec, computation_types.SequenceType):
        if not isinstance(value,
                          type_conversions.TF_DATASET_REPRESENTATION_TYPES):
            raise TypeError(
                'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
                    py_typecheck.type_string(type(value)),
                    type_spec if type_spec is not None else 'unknown'))

        value_type = computation_types.SequenceType(
            computation_types.to_type(value.element_spec))
        if not type_analysis.is_assignable_from(type_spec, value_type):
            raise TypeError(
                'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
                .format(value_type,
                        type_spec if type_spec is not None else 'unknown'))

        return serialize_sequence_value(value), type_spec
    elif isinstance(type_spec, computation_types.FederatedType):
        if type_spec.all_equal:
            value = [value]
        else:
            py_typecheck.check_type(value, list)
        items = []
        for v in value:
            it, it_type = serialize_value(v, type_spec.member)
            type_analysis.check_assignable_from(type_spec.member, it_type)
            items.append(it)
        result_proto = executor_pb2.Value(
            federated=executor_pb2.Value.Federated(
                type=type_serialization.serialize_type(type_spec).federated,
                value=items))
        return result_proto, type_spec
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
コード例 #30
0
 def __init__(self, parameter_type, zero_result):
     self.zero_result = zero_result
     fn_type = computation_types.FunctionType(parameter_type, tf.string)
     test_proto = pb.Computation(
         type=type_serialization.serialize_type(fn_type))
     super().__init__(test_proto, context_stack_impl.context_stack, fn_type)