예제 #1
0
def transform_to_native_form(
        comp: computation_base.Computation) -> computation_base.Computation:
    """Compiles a computation for execution in the TFF native runtime.

  This function transforms the proto underlying `comp` by transforming it
  to call-dominant form (see `tff.framework.transform_to_call_dominant` for
  definition).

  Args:
    comp: Instance of `computation_base.Computation` to compile.

  Returns:
    A new `computation_base.Computation` representing the compiled version of
    `comp`.
  """
    proto = computation_impl.ComputationImpl.get_proto(comp)
    computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
        proto)
    try:
        logging.debug('Compiling TFF computation.')
        call_dominant_form, _ = transformations.transform_to_call_dominant(
            computation_building_block)
        logging.debug('Computation compiled to:')
        logging.debug(call_dominant_form.formatted_representation())
        return computation_wrapper_instances.building_block_to_computation(
            call_dominant_form)
    except ValueError as e:
        logging.debug('Compilation for native runtime failed with error %s', e)
        logging.debug('computation: %s',
                      computation_building_block.compact_representation())
        return comp
예제 #2
0
def transform_to_native_form(
        comp: computation_base.Computation) -> computation_base.Computation:
    """Compiles a computation for execution in the TFF native runtime.

  This function transforms the proto underlying `comp` by first transforming it
  to call-dominant form (see `tff.framework.transform_to_call_dominant` for
  definition), then computing information on the dependency structure of the
  bindings and remapping them into tuples, such that every computation is
  evaluated as early as possible, and parallelized with any other computation
  with which it shares dependency structure.

  Args:
    comp: Instance of `computation_base.Computation` to compile.

  Returns:
    A new `computation_base.Computation` representing the compiled version of
    `comp`.
  """
    proto = computation_impl.ComputationImpl.get_proto(comp)
    computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
        proto)
    try:
        logging.debug('Compiling TFF computation.')
        call_dominant_form, _ = transformations.transform_to_call_dominant(
            computation_building_block)
        logging.debug('Computation compiled to:')
        logging.debug(call_dominant_form.formatted_representation())
        return computation_wrapper_instances.building_block_to_computation(
            call_dominant_form)
    except ValueError as e:
        logging.debug('Compilation for native runtime failed with error %s', e)
        logging.debug('computation: %s',
                      computation_building_block.compact_representation())
        return comp
예제 #3
0
    def test_handles_federated_broadcasts_nested_in_tuple(self):
        first_broadcast = compiler_test_utils.create_whimsy_called_federated_broadcast(
        )
        packed_broadcast = building_blocks.Struct([
            building_blocks.Data(
                'a',
                computation_types.FederatedType(
                    computation_types.TensorType(tf.int32),
                    placements.SERVER)), first_broadcast
        ])
        sel = building_blocks.Selection(packed_broadcast, index=0)
        second_broadcast = building_block_factory.create_federated_broadcast(
            sel)
        result, _ = compiler_transformations.transform_to_call_dominant(
            second_broadcast)
        comp = building_blocks.Lambda('a', tf.int32, result)
        uri = [intrinsic_defs.FEDERATED_BROADCAST.uri]

        before, after = transformations.force_align_and_split_by_intrinsics(
            comp, uri)

        self.assertIsInstance(before, building_blocks.Lambda)
        self.assertFalse(tree_analysis.contains_called_intrinsic(before, uri))
        self.assertIsInstance(after, building_blocks.Lambda)
        self.assertFalse(tree_analysis.contains_called_intrinsic(after, uri))
 def test_splits_on_selected_intrinsic_nested_in_tuple_broadcast(self):
   first_broadcast = compiler_test_utils.create_whimsy_called_federated_broadcast(
   )
   packed_broadcast = building_blocks.Struct([
       building_blocks.Data('a', computation_types.at_server(tf.int32)),
       first_broadcast
   ])
   sel = building_blocks.Selection(packed_broadcast, index=0)
   second_broadcast = building_block_factory.create_federated_broadcast(sel)
   result, _ = compiler_transformations.transform_to_call_dominant(
       second_broadcast)
   comp = building_blocks.Lambda('a', tf.int32, result)
   call = building_block_factory.create_null_federated_broadcast()
   self.assert_splits_on(comp, call)
예제 #5
0
def _do_not_use_transform_to_native_form(comp):
    """Use `tff.backends.native.transform_to_native_form`."""
    proto = computation_impl.ComputationImpl.get_proto(comp)
    computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
        proto)
    try:
        logging.debug('Compiling TFF computation.')
        call_dominant_form, _ = transformations.transform_to_call_dominant(
            computation_building_block)
        logging.debug('Computation compiled to:')
        logging.debug(call_dominant_form.formatted_representation())
        return computation_wrapper_instances.building_block_to_computation(
            call_dominant_form)
    except ValueError as e:
        logging.debug('Compilation for native runtime failed with error %s', e)
        logging.debug('computation: %s',
                      computation_building_block.compact_representation())
        return comp
예제 #6
0
def transform_to_native_form(
        comp: computation_base.Computation,
        transform_math_to_tf: bool = False) -> computation_base.Computation:
    """Compiles a computation for execution in the TFF native runtime.

  This function transforms the proto underlying `comp` by transforming it
  to call-dominant form (see `tff.framework.transform_to_call_dominant` for
  definition).

  Args:
    comp: Instance of `computation_base.Computation` to compile.
    transform_math_to_tf: Whether to additional transform math to TensorFlow
      graphs. Necessary if running on a execution state without
      ReferenceResolvingExecutors underneath FederatingExecutors.

  Returns:
    A new `computation_base.Computation` representing the compiled version of
    `comp`.
  """
    proto = computation_impl.ComputationImpl.get_proto(comp)
    computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
        proto)
    try:
        logging.debug('Compiling TFF computation to CDF.')
        call_dominant_form, _ = transformations.transform_to_call_dominant(
            computation_building_block)
        logging.debug('Computation compiled to:')
        logging.debug(call_dominant_form.formatted_representation())
        if transform_math_to_tf:
            logging.debug('Compiling local computations to TensorFlow.')
            call_dominant_form, _ = transformations.compile_local_computation_to_tensorflow(
                call_dominant_form)
            logging.debug('Computation compiled to:')
            logging.debug(call_dominant_form.formatted_representation())
        call_dominant_form, _ = tree_transformations.transform_tf_call_ops_to_disable_grappler(
            call_dominant_form)
        return computation_wrapper_instances.building_block_to_computation(
            call_dominant_form)
    except ValueError as e:
        logging.debug('Compilation for native runtime failed with error %s', e)
        logging.debug('computation: %s',
                      computation_building_block.compact_representation())
        return comp
예제 #7
0
def _replace_lambda_body_with_call_dominant_form(
        comp: building_blocks.Lambda) -> building_blocks.Lambda:
    """Transforms the body of `comp` to call-dominant form.

  Call-dominant form ensures that all higher-order functions are fully
  resolved, as well that called intrinsics are pulled out into a top-level
  let-binding. This combination of condition ensures first that pattern-matching
  on calls to intrinsics is sufficient to identify communication operators in
  `force_align_and_split_by_intrinsics`, and second that there are no nested
  intrinsics which will cause that function to fail.

  Args:
    comp: `building_blocks.Lambda` the body of which to convert to call-dominant
      form.

  Returns:
    A transformed version of `comp`, whose body is call-dominant.
  """
    lam_result = comp.result
    result_as_call_dominant, _ = compiler_transformations.transform_to_call_dominant(
        lam_result)
    return building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
                                  result_as_call_dominant)
예제 #8
0
def transform_to_native_form(
    comp: computation_base.Computation,
    transform_math_to_tf: bool = False,
    grappler_config: Optional[tf.compat.v1.ConfigProto] = None
) -> computation_base.Computation:
  """Compiles a computation for execution in the TFF native runtime.

  This function transforms the proto underlying `comp` by transforming it
  to call-dominant form (see `tff.framework.transform_to_call_dominant` for
  definition).

  Args:
    comp: Instance of `computation_base.Computation` to compile.
    transform_math_to_tf: Whether to additional transform math to TensorFlow
      graphs. Necessary if running on a execution state without
      ReferenceResolvingExecutors underneath FederatingExecutors.
    grappler_config: Configuration for Grappler optimizations to perform on the
      TensorFlow computations. If `None`, Grappler will not be run and no
      optimizations wil be applied.

  Returns:
    A new `computation_base.Computation` representing the compiled version of
    `comp`.
  """
  proto = computation_impl.ComputationImpl.get_proto(comp)
  computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
      proto)
  try:
    logging.debug('Compiling TFF computation to CDF.')
    with tracing.span(
        'transform_to_native_form', 'transform_to_call_dominant', span=True):
      call_dominant_form, _ = transformations.transform_to_call_dominant(
          computation_building_block)
    logging.debug('Computation compiled to:')
    logging.debug(call_dominant_form.formatted_representation())
    if transform_math_to_tf:
      logging.debug('Compiling local computations to TensorFlow.')
      with tracing.span(
          'transform_to_native_form',
          'compile_local_computations_to_tensorflow',
          span=True):
        call_dominant_form, _ = transformations.compile_local_computations_to_tensorflow(
            call_dominant_form)
      logging.debug('Computation compiled to:')
      logging.debug(call_dominant_form.formatted_representation())
    if grappler_config is not None:
      with tracing.span(
          'transform_to_native_form', 'optimize_tf_graphs', span=True):
        call_dominant_form, _ = transformations.optimize_tensorflow_graphs(
            call_dominant_form, grappler_config)
    with tracing.span(
        'transform_to_native_form',
        'transform_tf_call_ops_disable_grappler',
        span=True):
      disabled_grapler_form, _ = tree_transformations.transform_tf_call_ops_to_disable_grappler(
          call_dominant_form)
    with tracing.span(
        'transform_to_native_form', 'transform_tf_add_ids', span=True):
      form_with_ids, _ = tree_transformations.transform_tf_add_ids(
          disabled_grapler_form)
    return computation_wrapper_instances.building_block_to_computation(
        form_with_ids)
  except ValueError as e:
    logging.debug('Compilation for native runtime failed with error %s', e)
    logging.debug('computation: %s',
                  computation_building_block.compact_representation())
    return comp
예제 #9
0
def force_align_and_split_by_intrinsics(
    comp: building_blocks.Lambda,
    intrinsic_defaults: List[building_blocks.Call],
) -> Tuple[building_blocks.Lambda, building_blocks.Lambda]:
  """Divides `comp` into before-and-after of calls to one ore more intrinsics.

  The input computation `comp` must have the following properties:

  1. The computation `comp` is completely self-contained, i.e., there are no
     references to arguments introduced in a scope external to `comp`.

  2. `comp`'s return value must not contain uncalled lambdas.

  3. None of the calls to intrinsics in `intrinsic_defaults` may be
     within a lambda passed to another external function (intrinsic or
     compiled computation).

  4. No argument passed to an intrinsic in `intrinsic_defaults` may be
     dependent on the result of a call to an intrinsic in
     `intrinsic_uris_and_defaults`.

  5. All intrinsics in `intrinsic_defaults` must have "merge-able" arguments.
     Structs will be merged element-wise, federated values will be zipped, and
     functions will be composed:
       `f = lambda f1_arg, f2_arg: (f1(f1_arg), f2(f2_arg))`

  6. All intrinsics in `intrinsic_defaults` must return a single federated value
     whose member is the merged result of any merged calls, i.e.:
       `f(merged_arg).member = (f1(f1_arg).member, f2(f2_arg).member)`

  Under these conditions, this function will return two
  `building_blocks.Lambda`s `before` and `after` such that `comp` is
  semantically equivalent to the following expression*:

  ```
  (arg -> (let
    x=before(arg),
    y=intrinsic1(x[0]),
    z=intrinsic2(x[1]),
    ...
   in after(<arg, <y,z,...>>)))
  ```

  *Note that these expressions may not be entirely equivalent under
  nondeterminism since there is no way in this case to handle computations in
  which `before` creates a random variable that is then used in `after`, since
  the only way for state to pass from `before` to `after` is for it to travel
  through one of the intrinsics.

  In this expression, there is only a single call to `intrinsic` that results
  from consolidating all occurrences of this intrinsic in the original `comp`.
  All logic in `comp` that produced inputs to any these intrinsic calls is now
  consolidated and jointly encapsulated in `before`, which produces a combined
  argument to all the original calls. All the remaining logic in `comp`,
  including that which consumed the outputs of the intrinsic calls, must have
  been encapsulated into `after`.

  If the original computation `comp` had type `(T -> U)`, then `before` and
  `after` would be `(T -> X)` and `(<T,Y> -> U)`, respectively, where `X` is
  the type of the argument to the single combined intrinsic call above. Note
  that `after` takes the output of the call to the intrinsic as well as the
  original argument to `comp`, as it may be dependent on both.

  Args:
    comp: The instance of `building_blocks.Lambda` that serves as the input to
      this transformation, as described above.
    intrinsic_defaults: A list of intrinsics with which to split the
      computation, provided as a list of `Call`s to insert if no intrinsic with
      a matching URI is found. Intrinsics in this list will be merged, and
      `comp` will be split across them.

  Returns:
    A pair of the form `(before, after)`, where each of `before` and `after`
    is a `building_blocks.ComputationBuildingBlock` instance that represents a
    part of the result as specified above.
  """
  py_typecheck.check_type(comp, building_blocks.Lambda)
  py_typecheck.check_type(intrinsic_defaults, list)
  comp_repr = comp.compact_representation()

  # Flatten `comp` to call-dominant form so that we're working with just a
  # linear list of intrinsic calls with no indirection via tupling, selection,
  # blocks, called lambdas, or references.
  comp, _ = transformations.transform_to_call_dominant(comp)

  # CDF can potentially return blocks if there are variables not dependent on
  # the top-level parameter. We normalize these away.
  if not comp.is_lambda():
    comp.check_block()
    comp.result.check_lambda()
    if comp.result.result.is_block():
      additional_locals = comp.result.result.locals
      result = comp.result.result.result
    else:
      additional_locals = []
      result = comp.result.result
    # Note: without uniqueness, a local in `comp.locals` could potentially
    # shadow `comp.result.parameter_name`. However, `transform_to_call_dominant`
    # above ensure that names are unique, as it ends in a call to
    # `uniquify_reference_names`.
    comp = building_blocks.Lambda(
        comp.result.parameter_name, comp.result.parameter_type,
        building_blocks.Block(comp.locals + additional_locals, result))
  comp.check_lambda()

  # Simple computations with no intrinsic calls won't have a block.
  # Normalize these as well.
  if not comp.result.is_block():
    comp = building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
                                  building_blocks.Block([], comp.result))
  comp.result.check_block()

  name_generator = building_block_factory.unique_name_generator(comp)

  intrinsic_uris = set(call.function.uri for call in intrinsic_defaults)
  deps = _compute_intrinsic_dependencies(intrinsic_uris, comp.parameter_name,
                                         comp.result.locals, comp_repr)
  merged_intrinsics = _compute_merged_intrinsics(intrinsic_defaults,
                                                 deps.uri_to_locals,
                                                 name_generator)

  # Note: the outputs are labeled as `{uri}_param for convenience, e.g.
  # `federated_secure_sum_param: ...`.
  before = building_blocks.Lambda(
      comp.parameter_name, comp.parameter_type,
      building_blocks.Block(
          deps.locals_not_dependent_on_intrinsics,
          building_blocks.Struct([(f'{merged.uri}_param', merged.args)
                                  for merged in merged_intrinsics])))

  after_param_name = next(name_generator)
  after_param_type = computation_types.StructType([
      ('original_arg', comp.parameter_type),
      ('intrinsic_results',
       computation_types.StructType([(f'{merged.uri}_result',
                                      merged.return_type)
                                     for merged in merged_intrinsics])),
  ])
  after_param_ref = building_blocks.Reference(after_param_name,
                                              after_param_type)
  unzip_bindings = []
  for merged in merged_intrinsics:
    if merged.unpack_to_locals:
      intrinsic_result = building_blocks.Selection(
          building_blocks.Selection(after_param_ref, name='intrinsic_results'),
          name=f'{merged.uri}_result')
      select_param_type = intrinsic_result.type_signature.member
      for i, binding_name in enumerate(merged.unpack_to_locals):
        select_param_name = next(name_generator)
        select_param_ref = building_blocks.Reference(select_param_name,
                                                     select_param_type)
        selected = building_block_factory.create_federated_map_or_apply(
            building_blocks.Lambda(
                select_param_name, select_param_type,
                building_blocks.Selection(select_param_ref, index=i)),
            intrinsic_result)
        unzip_bindings.append((binding_name, selected))
  after = building_blocks.Lambda(
      after_param_name,
      after_param_type,
      building_blocks.Block(
          [(comp.parameter_name,
            building_blocks.Selection(after_param_ref, name='original_arg'))] +
          # Note that we must duplicate `locals_not_dependent_on_intrinsics`
          # across both the `before` and `after` computations since both can
          # rely on them, and there's no way to plumb results from `before`
          # through to `after` except via one of the intrinsics being split
          # upon. In MapReduceForm, this limitation is caused by the fact that
          # `prepare` has no output which serves as an input to `report`.
          deps.locals_not_dependent_on_intrinsics + unzip_bindings +
          deps.locals_dependent_on_intrinsics,
          comp.result.result))
  try:
    tree_analysis.check_has_unique_names(before)
    tree_analysis.check_has_unique_names(after)
  except:
    raise ValueError(f'nonunique names in result of splitting\n{comp}')
  return before, after
예제 #10
0
def consolidate_and_extract_local_processing(comp, grappler_config_proto):
  """Consolidates all the local processing in `comp`.

  The input computation `comp` must have the following properties:

  1. The output of `comp` may be of a federated type or unplaced. We refer to
     the placement `p` of that type as the placement of `comp`. There is no
     placement anywhere in the body of `comp` different than `p`. If `comp`
     is of a functional type, and has a parameter, the type of that parameter
     is a federated type placed at `p` as well, or unplaced if the result of
     the function is unplaced.

  2. The only intrinsics that may appear in the body of `comp` are those that
     manipulate data locally within the same placement. The exact set of these
     intrinsics will be gradually updated. At the moment, we support only the
     following:

     * Either `federated_apply` or `federated_map`, depending on whether `comp`
       is `SERVER`- or `CLIENTS`-placed. `federated_map_all_equal` is also
       allowed in the `CLIENTS`-placed case.

     * Either `federated_value_at_server` or `federated_value_at_clients`,
       likewise placement-dependent.

     * Either `federated_zip_at_server` or `federated_zip_at_clients`, again
       placement-dependent.

     Anything else, including `sequence_*` operators, should have been reduced
     already prior to calling this function.

  3. There are no lambdas in the body of `comp` except for `comp` itself being
     possibly a (top-level) lambda. All other lambdas must have been reduced.
     This requirement may eventually be relaxed by embedding lambda reducer into
     this helper method.

  4. If `comp` is of a functional type, it is either an instance of
     `building_blocks.CompiledComputation`, in which case there is nothing for
     us to do here, or a `building_blocks.Lambda`.

  5. There is at most one unbound reference under `comp`, and this is only
     allowed in the case that `comp` is not of a functional type.

  Aside from the intrinsics specified above, and the possibility of allowing
  lambdas, blocks, and references given the constraints above, the remaining
  constructs in `comp` include a combination of tuples, selections, calls, and
  sections of TensorFlow (as `CompiledComputation`s). This helper function does
  contain the logic to consolidate these constructs.

  The output of this transformation is always a single section of TensorFlow,
  which we henceforth refer to as `result`, the exact form of which depends on
  the placement of `comp` and the presence or absence of an argument.

  a. If there is no argument in `comp`, and `comp` is `SERVER`-placed, then
     the `result` is such that `comp` can be equivalently represented as:

     ```
     federated_value_at_server(result())
     ```

  b. If there is no argument in `comp`, and `comp` is `CLIENTS`-placed, then
     the `result` is such that `comp` can be equivalently represented as:

     ```
     federated_value_at_clients(result())
     ```

  c. If there is an argument in `comp`, and `comp` is `SERVER`-placed, then
     the `result` is such that `comp` can be equivalently represented as:

     ```
     (arg -> federated_apply(<result, arg>))
     ```

  d. If there is an argument in `comp`, and `comp` is `CLIENTS`-placed, then
     the `result` is such that `comp` can be equivalently represented as:

     ```
     (arg -> federated_map(<result, arg>))
     ```

  If the type of `comp` is `T@p` (thus `comp` is non-functional), the type of
  `result` is `T`, where `p` is the specific (concrete) placement of `comp`.

  If the type of `comp` is `(T@p -> U@p)`, then the type of `result` must be
  `(T -> U)`, where `p` is again a specific placement.

  Args:
    comp: An instance of `building_blocks.ComputationBuildingBlock` that serves
      as the input to this transformation, as described above.
    grappler_config_proto: An instance of `tf.compat.v1.ConfigProto` to
      configure Grappler graph optimization of the generated TensorFlow graph.
      If `grappler_config_proto` has
      `graph_options.rewrite_options.disable_meta_optimizer=True`, Grappler is
      bypassed.

  Returns:
    An instance of `building_blocks.CompiledComputation` that holds the
    TensorFlow section produced by this extraction step, as described above.
  """
  py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
  comp.type_signature.check_function()
  # Drop any unused subcomputations which may reference placements different
  # from the result.
  simplified, _ = transformations.transform_to_call_dominant(comp)
  unplaced, _ = tree_transformations.strip_placement(simplified)
  extracted = parse_tff_to_tf(unplaced, grappler_config_proto)
  check_extraction_result(unplaced, extracted)
  return extracted