Beispiel #1
0
 def test_serialize_type_with_string_sequence(self):
     self.assertEqual(
         _compact_repr(
             type_serialization.serialize_type(
                 computation_types.SequenceType(tf.string))),
         'sequence { element { tensor { dtype: DT_STRING shape { } } } }')
Beispiel #2
0
 def test_serialize_type_with_placement(self):
     self.assertEqual(
         _compact_repr(
             type_serialization.serialize_type(
                 computation_types.PlacementType())), 'placement { }')
Beispiel #3
0
 def test_serialize_type_with_tensor_dtype_with_shape(self):
     self.assertEqual(
         _compact_repr(
             type_serialization.serialize_type((tf.int32, [10, 20]))),
         'tensor { dtype: DT_INT32 '
         'shape { dim { size: 10 } dim { size: 20 } } }')
Beispiel #4
0
 def test_serialize_type_with_tensor_dtype_with_shape_undefined_dim(self):
     self.assertEqual(
         _compact_repr(type_serialization.serialize_type(
             (tf.int32, [None]))), 'tensor { dtype: DT_INT32 '
         'shape { dim { size: -1 } } }')
Beispiel #5
0
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
    """Serializes the 'target' as a TF computation with a given parameter type.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function or `tf.function`, with arguments
      matching the 'parameter_type'.
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    unpack: Whether to always unpack the parameter_type. Necessary for support
      of polymorphic tf2_computations.

  Returns:
    The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
      set.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    py_typecheck.check_callable(target)
    parameter_type = computation_types.to_type(parameter_type)
    argspec = function_utils.get_argspec(target)
    if argspec.args and not parameter_type:
        raise ValueError(
            'Expected the target to declare no parameters, found {}.'.format(
                repr(argspec.args)))

    # In the codepath for TF V1 based serialization (tff.tf_computation),
    # we get the "wrapped" function to serialize. Here, target is the
    # raw function to be wrapped; however, we still need to know if
    # the parameter_type should be unpacked into multiple args and kwargs
    # in order to construct the TensorSpecs to be passed in the call
    # to get_concrete_fn below.
    unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
    arg_typespecs, kwarg_typespecs, parameter_binding = (
        graph_utils.get_tf_typespec_and_binding(parameter_type,
                                                arg_names=argspec.args,
                                                unpack=unpack))

    # Pseudo-global to be appended to once when target_poly below is traced.
    type_and_binding_slot = []

    # N.B. To serialize a tf.function or eager python code,
    # the return type must be a flat list, tuple, or dict. However, the
    # tff.tf_computation must be able to handle structured inputs and outputs.
    # Thus, we intercept the result of calling the original target fn, introspect
    # its structure to create a result_type and bindings, and then return a
    # flat dict output. It is this new "unpacked" tf.function that we will
    # serialize using tf.saved_model.save.
    #
    # TODO(b/117428091): The return type limitation is primarily a limitation of
    # SignatureDefs  and therefore of the signatures argument to
    # tf.saved_model.save. tf.functions attached to objects and loaded back with
    # tf.saved_model.load can take/return nests; this might offer a better
    # approach to the one taken here.

    @tf.function(autograph=False)
    def target_poly(*args, **kwargs):
        result = target(*args, **kwargs)
        result_dict, result_type, result_binding = (
            graph_utils.get_tf2_result_dict_and_binding(result))
        assert not type_and_binding_slot
        # A "side channel" python output.
        type_and_binding_slot.append((result_type, result_binding))
        return result_dict

    # Triggers tracing so that type_and_binding_slot is filled.
    cc_fn = target_poly.get_concrete_function(*arg_typespecs,
                                              **kwarg_typespecs)
    assert len(type_and_binding_slot) == 1
    result_type, result_binding = type_and_binding_slot[0]

    # N.B. Note that cc_fn does *not* accept the same args and kwargs as the
    # Python target_poly; instead, it must be called with **kwargs based on the
    # unique names embedded in the TensorSpecs inside arg_typespecs and
    # kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
    # between these tensor names and the components of the (possibly nested) TFF
    # input type. When cc_fn is serialized, concrete tensors for each input are
    # introduced, and the call finalize_binding(parameter_binding,
    # sigs['serving_default'].inputs) updates the bindings to reference these
    # concrete tensors.

    # Associate vars with unique names and explicitly attach to the Checkpoint:
    var_dict = {
        'var{:02d}'.format(i): v
        for i, v in enumerate(cc_fn.graph.variables)
    }
    saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)

    try:
        # TODO(b/122081673): All we really need is the  meta graph def, we could
        # probably just load that directly, e.g., using parse_saved_model from
        # tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
        # depend on that presumably non-public symbol. Perhaps TF can expose a way
        # to just get the MetaGraphDef directly without saving to a tempfile? This
        # looks like a small change to v2.saved_model.save().
        outdir = tempfile.mkdtemp('savedmodel')
        tf.saved_model.save(saveable, outdir, signatures=cc_fn)

        graph = tf.Graph()
        with tf.Session(graph=graph) as sess:
            mgd = tf.saved_model.loader.load(
                sess,
                tags=[tf.saved_model.tag_constants.SERVING],
                export_dir=outdir)
    finally:
        shutil.rmtree(outdir)
    sigs = mgd.signature_def

    # TODO(b/123102455): Figure out how to support the init_op. The meta graph def
    # contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
    # probably won't do what we want, because it will want to read from
    # Checkpoints, not just run Variable initializerse (?). The right solution may
    # be to grab the target_poly.get_initialization_function(), and save a sig for
    # that.

    # Now, traverse the signature from the MetaGraphDef to find
    # find the actual tensor names and write them into the bindings.
    finalize_binding(parameter_binding, sigs['serving_default'].inputs)
    finalize_binding(result_binding, sigs['serving_default'].outputs)

    annotated_type = computation_types.FunctionType(parameter_type,
                                                    result_type)

    return pb.Computation(type=pb.Type(function=pb.FunctionType(
        parameter=type_serialization.serialize_type(parameter_type),
        result=type_serialization.serialize_type(result_type))),
                          tensorflow=pb.TensorFlow(
                              graph_def=serialization_utils.pack_graph_def(
                                  mgd.graph_def),
                              parameter=parameter_binding,
                              result=result_binding)), annotated_type
Beispiel #6
0
 def test_serialize_type_with_tensor_dtype_without_shape(self):
     self.assertEqual(
         _compact_repr(type_serialization.serialize_type(tf.int32)),
         'tensor { dtype: DT_INT32 shape { } }')
def concatenate_tensorflow_blocks(tf_comp_list):
    """Concatenates inputs and outputs of its argument to a single TF block.

  Takes a Python `list` or `tuple` of instances of
  `computation_building_blocks.CompiledComputation`, and constructs a single
  instance of the same building block representing the computations present
  in this list concatenated side-by-side.

  There is one important convention here for callers to be aware of.
  `concatenate_tensorflow_blocks` does not perform any more packing into tuples
  than necessary. That is, if `tf_comp_list` contains only a single TF
  computation which declares a parameter, the parameter type of the resulting
  computation is exactly this single parameter type. Since all TF blocks declare
  a result, this is only of concern for parameters, and we will always return a
  function with a tuple for its result value.

  Args:
    tf_comp_list: Python `list` or `tuple` of
      `computation_building_blocks.CompiledComputation`s, whose inputs and
      outputs we wish to concatenate.

  Returns:
    A single instance of `computation_building_blocks.CompiledComputation`,
    representing all the computations in `tf_comp_list` concatenated
    side-by-side.

  Raises:
    ValueError: If we are passed less than 2 computations in `tf_comp_list`. In
      this case, the caller is likely using the wrong function.
    TypeError: If `tf_comp_list` is not a `list` or `tuple`, or if it
      contains anything other than TF blocks.
  """
    py_typecheck.check_type(tf_comp_list, (list, tuple))
    if len(tf_comp_list) < 2:
        raise ValueError(
            'We expect to concatenate at least two blocks of '
            'TensorFlow; otherwise the transformation you seek '
            'represents simply type manipulation, and you will find '
            'your desired function elsewhere in '
            '`compiled_computation_transforms`. You passed a tuple of '
            'length {}'.format(len(tf_comp_list)))
    tf_proto_list = []
    for comp in tf_comp_list:
        py_typecheck.check_type(
            comp, computation_building_blocks.CompiledComputation)
        tf_proto_list.append(comp.proto)

    (merged_graph, init_op_name, parameter_name_maps,
     result_name_maps) = graph_merge.concatenate_inputs_and_outputs(
         [_unpack_proto_into_graph_spec(x) for x in tf_proto_list])

    concatenated_parameter_bindings = _pack_concatenated_bindings(
        [x.tensorflow.parameter for x in tf_proto_list], parameter_name_maps)
    concatenated_result_bindings = _pack_concatenated_bindings(
        [x.tensorflow.result for x in tf_proto_list], result_name_maps)

    if concatenated_parameter_bindings:
        tf_result_proto = pb.TensorFlow(
            graph_def=serialization_utils.pack_graph_def(
                merged_graph.as_graph_def()),
            initialize_op=init_op_name,
            parameter=concatenated_parameter_bindings,
            result=concatenated_result_bindings)
    else:
        tf_result_proto = pb.TensorFlow(
            graph_def=serialization_utils.pack_graph_def(
                merged_graph.as_graph_def()),
            initialize_op=init_op_name,
            result=concatenated_result_bindings)

    parameter_type = _construct_concatenated_type(
        [x.type_signature.parameter for x in tf_comp_list])
    return_type = _construct_concatenated_type(
        [x.type_signature.result for x in tf_comp_list])
    function_type = computation_types.FunctionType(parameter_type, return_type)
    serialized_function_type = type_serialization.serialize_type(function_type)

    constructed_proto = pb.Computation(type=serialized_function_type,
                                       tensorflow=tf_result_proto)
    return computation_building_blocks.CompiledComputation(constructed_proto)
Beispiel #8
0
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
    """Serializes the 'target' as a TF computation with a given parameter type.

  See also `serialize_tf2_as_tf_computation` for TensorFlow 2
  serialization.

  Args:
    target: The entity to convert into and serialize as a TF computation. This
      can currently only be a Python function. In the future, we will add here
      support for serializing the various kinds of non-eager and eager
      functions, and eventually aim at full support for and compliance with TF
      2.0. This function is currently required to declare either zero parameters
      if `parameter_type` is `None`, or exactly one parameter if it's not
      `None`.  The nested structure of this parameter must correspond to the
      structure of the 'parameter_type'. In the future, we may support targets
      with multiple args/keyword args (to be documented in the API and
      referenced from here).
    parameter_type: The parameter type specification if the target accepts a
      parameter, or `None` if the target doesn't declare any parameters. Either
      an instance of `types.Type`, or something that's convertible to it by
      `types.to_type()`.
    context_stack: The context stack to use.

  Returns:
    A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
    the instance with the `pb.TensorFlow` variant set, and the type is an
    instance of `tff.Type`, potentially including Python container annotations,
    for use by TensorFlow computation wrappers.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the signature of the target is not compatible with the given
      parameter type.
  """
    # TODO(b/113112108): Support a greater variety of target type signatures,
    # with keyword args or multiple args corresponding to elements of a tuple.
    # Document all accepted forms with examples in the API, and point to there
    # from here.

    py_typecheck.check_type(target, types.FunctionType)
    py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
    parameter_type = computation_types.to_type(parameter_type)
    argspec = inspect.getargspec(target)  # pylint: disable=deprecated-method

    with tf.Graph().as_default() as graph:
        args = []
        if parameter_type:
            if len(argspec.args) != 1:
                raise ValueError(
                    'Expected the target to declare exactly one parameter, '
                    'found {}.'.format(repr(argspec.args)))
            parameter_name = argspec.args[0]
            parameter_value, parameter_binding = graph_utils.stamp_parameter_in_graph(
                parameter_name, parameter_type, graph)
            args.append(parameter_value)
        else:
            if argspec.args:
                raise ValueError(
                    'Expected the target to declare no parameters, found {}.'.
                    format(repr(argspec.args)))
            parameter_binding = None
        context = tf_computation_context.TensorFlowComputationContext(graph)
        with context_stack.install(context):
            result = target(*args)

            # TODO(b/122081673): This needs to change for TF 2.0. We may also
            # want to allow the person creating a tff.tf_computation to specify
            # a different initializer; e.g., if it is known that certain
            # variables will be assigned immediately to arguments of the function,
            # then it is wasteful to initialize them before this.
            #
            # The following is a bit of a work around: the collections below may
            # contain variables more than once, hence we throw into a set. TFF needs
            # to ensure all variables are initialized, but not all variables are
            # always in the collections we expect. tff.learning._KerasModel tries to
            # pull Keras variables (that may or may not be in GLOBAL_VARIABLES) into
            # TFF_MODEL_VARIABLES for now.
            all_variables = set(tf.global_variables() + tf.local_variables() +
                                tf.get_collection(graph_keys.GraphKeys.
                                                  VARS_FOR_TFF_TO_INITIALIZE))
            if all_variables:
                # Use a readable but not-too-long name for the init_op.
                name = 'init_op_for_' + '_'.join(
                    [v.name.replace(':0', '') for v in all_variables])
                if len(name) > 50:
                    name = 'init_op_for_{}_variables'.format(
                        len(all_variables))
                with tf.control_dependencies(context.init_ops):
                    # Before running the main new init op, run any initializers for sub-
                    # computations from context.init_ops. Variables from import_graph_def
                    # will not make it into the global collections, and so will not be
                    # initialized without this code path.
                    init_op_name = tf.initializers.variables(all_variables,
                                                             name=name).name
            elif context.init_ops:
                init_op_name = tf.group(*context.init_ops,
                                        name='subcomputation_init_ops').name
            else:
                init_op_name = None

        result_type, result_binding = graph_utils.capture_result_from_graph(
            result, graph)

    annotated_type = computation_types.FunctionType(parameter_type,
                                                    result_type)

    return pb.Computation(type=pb.Type(function=pb.FunctionType(
        parameter=type_serialization.serialize_type(parameter_type),
        result=type_serialization.serialize_type(result_type))),
                          tensorflow=pb.TensorFlow(
                              graph_def=serialization_utils.pack_graph_def(
                                  graph.as_graph_def()),
                              parameter=parameter_binding,
                              result=result_binding,
                              initialize_op=init_op_name)), annotated_type
def pad_graph_inputs_to_match_type(comp, type_signature):
    r"""Pads the parameter bindings of `comp` to match `type_signature`.

  The padded parameters here are in effect dummy bindings--they are not
  plugged in elsewhere in `comp`. This pattern is necessary to transform TFF
  expressions of the form:

                            Lambda(arg)
                                |
                              Call
                             /     \
          CompiledComputation       Tuple
                                      |
                                  Selection[i]
                                      |
                                    Ref(arg)

  into the form:

                          CompiledComputation

  in the case where arg in the above picture represents an n-tuple, where n > 1.

  Notice that some type manipulation must take place to execute the
  transformation outlined above, or anything similar to it, since the Lambda
  we are looking to replace accepts a parameter of an n-tuple, whereas the
  `CompiledComputation` represented above accepts only a 1-tuple.
  `pad_graph_inputs_to_match_type` is intended as an intermediate transform in
  the transformation outlined above, since there may also need to be some
  parameter permutation via `permute_graph_inputs`.

  Notice also that the existing parameter bindings of `comp` must match the
  first elements of `type_signature`. This is to ensure that we are attempting
  to pad only compatible `CompiledComputation`s to a given type signature.

  Args:
    comp: Instance of `computation_building_blocks.CompiledComputation`
      representing the graph whose inputs we want to pad to match
      `type_signature`.
    type_signature: Instance of `computation_types.NamedTupleType` representing
      the type signature we wish to pad `comp` to accept as a parameter.

  Returns:
    A transformed version of `comp`, instance of
    `computation_building_blocks.CompiledComputation` which takes an argument
    of type `type_signature` and executes the same logic as `comp`. In
    particular, this transformed version will have the same return type as
    the original `comp`.

  Raises:
    TypeError: If the proto underlying `comp` has a parameter type which
      is not of `NamedTupleType`, the `type_signature` argument is not of type
      `NamedTupleType`, or there is a type mismatch between the declared
      parameters of `comp` and the requested `type_signature`.
    ValueError: If the requested `type_signature` is shorter than the
      parameter type signature declared by `comp`.
  """
    py_typecheck.check_type(type_signature, computation_types.NamedTupleType)
    py_typecheck.check_type(comp,
                            computation_building_blocks.CompiledComputation)
    proto = comp.proto
    graph_def = proto.tensorflow.graph_def
    graph_parameter_binding = proto.tensorflow.parameter
    proto_type = type_serialization.deserialize_type(proto.type)
    binding_oneof = graph_parameter_binding.WhichOneof('binding')
    if binding_oneof != 'tuple':
        raise TypeError(
            'Can only pad inputs of a CompiledComputation with parameter type '
            'tuple; you have attempted to pad a CompiledComputation '
            'with parameter type {}'.format(binding_oneof))
    # This line provides protection against an improperly serialized proto
    py_typecheck.check_type(proto_type.parameter,
                            computation_types.NamedTupleType)
    parameter_bindings = [x for x in graph_parameter_binding.tuple.element]
    parameter_type_elements = anonymous_tuple.to_elements(proto_type.parameter)
    type_signature_elements = anonymous_tuple.to_elements(type_signature)
    if len(parameter_bindings) > len(type_signature):
        raise ValueError(
            'We can only pad graph input bindings, never mask them. '
            'This means that a proposed type signature passed to '
            '`pad_graph_inputs_to_match_type` must have more elements '
            'than the existing type signature of the compiled '
            'computation. You have proposed a type signature of '
            'length {} be assigned to a computation with parameter '
            'type signature of length {}.'.format(len(type_signature),
                                                  len(parameter_bindings)))
    if any(x != type_signature_elements[idx]
           for idx, x in enumerate(parameter_type_elements)):
        raise TypeError(
            'The existing elements of the parameter type signature '
            'of the compiled computation in `pad_graph_inputs_to_match_type` '
            'must match the beginning of the proposed new type signature; '
            'you have proposed a parameter type of {} for a computation '
            'with existing parameter type {}.'.format(type_signature,
                                                      proto_type.parameter))
    g = tf.Graph()
    with g.as_default():
        tf.graph_util.import_graph_def(
            serialization_utils.unpack_graph_def(graph_def), name='')

    elems_to_stamp = anonymous_tuple.to_elements(
        type_signature)[len(parameter_bindings):]
    for name, type_spec in elems_to_stamp:
        if name is None:
            stamp_name = 'name'
        else:
            stamp_name = name
        _, stamped_binding = graph_utils.stamp_parameter_in_graph(
            stamp_name, type_spec, g)
        parameter_bindings.append(stamped_binding)
        parameter_type_elements.append((name, type_spec))

    new_parameter_binding = pb.TensorFlow.Binding(
        tuple=pb.TensorFlow.NamedTupleBinding(element=parameter_bindings))
    new_graph_def = g.as_graph_def()

    new_function_type = computation_types.FunctionType(parameter_type_elements,
                                                       proto_type.result)
    serialized_type = type_serialization.serialize_type(new_function_type)

    input_padded_proto = pb.Computation(
        type=serialized_type,
        tensorflow=pb.TensorFlow(
            graph_def=serialization_utils.pack_graph_def(new_graph_def),
            initialize_op=proto.tensorflow.initialize_op,
            parameter=new_parameter_binding,
            result=proto.tensorflow.result))

    return computation_building_blocks.CompiledComputation(input_padded_proto)
def select_graph_output(comp, name=None, index=None):
    r"""Makes `CompiledComputation` with same input as `comp` and output `output`.

  Given an instance of `computation_building_blocks.CompiledComputation` `comp`
  with type signature (T -> <U, ...,V>), `select_output` returns a
  `CompiledComputation` representing the logic of calling `comp` and then
  selecting `name` or `index` from the resulting `tuple`. Notice that only one
  of `name` or `index` can be specified, and one of them must be specified.

  At the level of a TFF AST, `select_graph_output` is necessary to transform
  the structure below:

                                Select(x)
                                   |
                                  Call
                                 /    \
                            Graph      Comp

  into:

                                Call
                               /    \
  select_graph_output(Graph, x)      Comp


  Args:
    comp: Instance of `computation_building_blocks.CompiledComputation` which
      must have result type `computation_types.NamedTupleType`, the function
      from which to select `output`.
    name: Instance of `str`, the name of the field to select from the output of
      `comp`. Optional, but one of `name` or `index` must be specified.
    index: Instance of `index`, the index of the field to select from the output
      of `comp`. Optional, but one of `name` or `index` must be specified.

  Returns:
    An instance of `computation_building_blocks.CompiledComputation` as
    described, the result of selecting the appropriate output from `comp`.
  """
    py_typecheck.check_type(comp,
                            computation_building_blocks.CompiledComputation)
    if index and name:
        raise ValueError(
            'Please specify at most one of `name` or `index` to `select_outputs`.'
        )
    if index is not None:
        py_typecheck.check_type(index, int)
    elif name is not None:
        py_typecheck.check_type(name, str)
    else:
        raise ValueError(
            'Please pass a `name` or `index` to `select_outputs`.')
    proto = comp.proto
    graph_result_binding = proto.tensorflow.result
    binding_oneof = graph_result_binding.WhichOneof('binding')
    if binding_oneof != 'tuple':
        raise TypeError(
            'Can only select output from a CompiledComputation with return type '
            'tuple; you have attempted a selection from a CompiledComputation '
            'with return type {}'.format(binding_oneof))
    proto_type = type_serialization.deserialize_type(proto.type)
    py_typecheck.check_type(proto_type.result,
                            computation_types.NamedTupleType)
    if name is None:
        result = [x for x in graph_result_binding.tuple.element][index]
        result_type = proto_type.result[index]
    else:
        type_names_list = [
            x[0] for x in anonymous_tuple.to_elements(proto_type.result)
        ]
        index = type_names_list.index(name)
        result = [x for x in graph_result_binding.tuple.element][index]
        result_type = proto_type.result[index]
    serialized_type = type_serialization.serialize_type(
        computation_types.FunctionType(proto_type.parameter, result_type))
    selected_proto = pb.Computation(
        type=serialized_type,
        tensorflow=pb.TensorFlow(graph_def=proto.tensorflow.graph_def,
                                 initialize_op=proto.tensorflow.initialize_op,
                                 parameter=proto.tensorflow.parameter,
                                 result=result))
    return computation_building_blocks.CompiledComputation(selected_proto)
def permute_graph_inputs(comp, input_permutation):
    r"""Remaps input indices of `comp` to match the `input_permutation`.

  Changes the order of the parameters `comp`, an instance of
  `computation_building_blocks.CompiledComputation`. Accepts a permutation
  of the input tuple by index, and applies this permutation to the input
  bindings of `comp`. For example, given a `comp` which accepts a 3-tuple of
  types `[tf.int32, tf.float32, tf.bool]` as its parameter, passing in the
  input permutation

                          [2, 0, 1]

  would change the order of the parameter bindings accepted, so that
  `permute_graph_inputs` returns a
  `computation_building_blocks.CompiledComputation`
  accepting a 3-tuple of types `[tf.bool, tf.int32, tf.float32]`. Notice that
  we use one-line notation for our permutations, with beginning index 0
  (https://en.wikipedia.org/wiki/Permutation#One-line_notation).

  At the AST structural level, this is a no-op, as it simply takes in one
  instance of `computation_building_blocks.CompiledComputation` and returns
  another. However, it is necessary to make a replacement such as transforming:

                          Call
                         /    \
                    Graph      Tuple
                              / ... \
                  Selection(i)       Selection(j)
                       |                  |
                     Comp               Comp

  into:
                                     Call
                                    /    \
  permute_graph_inputs(Graph, [...])      Comp

  Args:
    comp: Instance of `computation_building_blocks.CompiledComputation` whose
      parameter bindings we wish to permute.
    input_permutation: The permutation we wish to apply to the parameter
      bindings of `comp` in 0-indexed one-line permutation notation. This can be
      a Python `list` or `tuple` of `int`s.

  Returns:
    An instance of `computation_building_blocks.CompiledComputation` whose
    parameter bindings represent the same as the result of applying
    `input_permutation` to the parameter bindings of `comp`.

  Raises:
    TypeError: If the types specified in the args section do not match.
  """

    py_typecheck.check_type(comp,
                            computation_building_blocks.CompiledComputation)
    py_typecheck.check_type(input_permutation, (tuple, list))
    permutation_length = len(input_permutation)
    for index in input_permutation:
        py_typecheck.check_type(index, int)
    proto = comp.proto
    graph_parameter_binding = proto.tensorflow.parameter
    proto_type = type_serialization.deserialize_type(proto.type)
    py_typecheck.check_type(proto_type.parameter,
                            computation_types.NamedTupleType)
    binding_oneof = graph_parameter_binding.WhichOneof('binding')
    if binding_oneof != 'tuple':
        raise TypeError(
            'Can only permute inputs of a CompiledComputation with parameter type '
            'tuple; you have attempted a permutation with a CompiledComputation '
            'with parameter type {}'.format(binding_oneof))

    original_parameter_type_elements = anonymous_tuple.to_elements(
        proto_type.parameter)
    original_parameter_bindings = [
        x for x in graph_parameter_binding.tuple.element
    ]

    def _is_permutation(ls):
        #  Sorting since these shouldn't be long
        return list(sorted(ls)) == list(range(permutation_length))

    if len(original_parameter_bindings
           ) != permutation_length or not _is_permutation(input_permutation):
        raise ValueError(
            'Can only map the inputs with a true permutation; that '
            'is, the position of each input element must be uniquely specified. '
            'You have tried to map inputs {} with permutation {}'.format(
                original_parameter_bindings, input_permutation))

    new_parameter_bindings = [
        original_parameter_bindings[k] for k in input_permutation
    ]
    new_parameter_type_elements = [
        original_parameter_type_elements[k] for k in input_permutation
    ]

    serialized_type = type_serialization.serialize_type(
        computation_types.FunctionType(new_parameter_type_elements,
                                       proto_type.result))
    permuted_proto = pb.Computation(
        type=serialized_type,
        tensorflow=pb.TensorFlow(graph_def=proto.tensorflow.graph_def,
                                 initialize_op=proto.tensorflow.initialize_op,
                                 parameter=pb.TensorFlow.Binding(
                                     tuple=pb.TensorFlow.NamedTupleBinding(
                                         element=new_parameter_bindings)),
                                 result=proto.tensorflow.result))
    return computation_building_blocks.CompiledComputation(permuted_proto)
 def proto(self):
     return pb.Computation(type=type_serialization.serialize_type(
         self.type_signature),
                           placement=pb.Placement(uri=self._literal.uri))
 def proto(self):
     return pb.Computation(type=type_serialization.serialize_type(
         self.type_signature),
                           data=pb.Data(uri=self._uri))
 def proto(self):
     return pb.Computation(type=type_serialization.serialize_type(
         self.type_signature),
                           intrinsic=pb.Intrinsic(uri=self._uri))
 def proto(self):
     return pb.Computation(type=type_serialization.serialize_type(
         self.type_signature),
                           reference=pb.Reference(name=self._name))