Example #1
0
def serialize_value(
    value: Any,
    type_spec: Optional[computation_types.Type] = None,
    *,
    value_proto: Optional[serialization_bindings.Value] = None
) -> computation_types.Type:
    """Serializes a value into `serialization_bindings.Value`.

  We use a switch/function pattern in the body here (and in `deserialize_value`
  below in order to persist more information in traces and profiling.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.
    value_proto: The protobuf instance to serialize into. `value_proto` will
      first be cleared before serializing.

  Returns:
    An instance of `tff.Type` that represents the TFF type of the serialized
    value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    if value_proto is None:
        value_proto = serialization_bindings.Value()
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        return _serialize_computation(value, type_spec)
    elif isinstance(value, computation_impl.ComputationImpl):
        return _serialize_computation(
            computation_impl.ComputationImpl.get_proto(value),
            executor_utils.reconcile_value_with_type_spec(value, type_spec))
    elif type_spec is None:
        raise TypeError(
            'A type hint is required when serializing a value which '
            'is not a TFF computation. Asked to serialized value {v} '
            ' of type {t} with None type spec.'.format(v=value, t=type(value)))
    elif type_spec.is_tensor():
        return _serialize_tensor_value(value_proto, value, type_spec)
    elif type_spec.is_sequence():
        return _serialize_sequence_value(value_proto, value, type_spec)
    elif type_spec.is_struct():
        return _serialize_struct_type(value_proto, value, type_spec)
    elif type_spec.is_federated():
        proto, type_spec = _serialize_federated_value(value_proto, value,
                                                      type_spec)
        return proto, type_spec
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
Example #2
0
 async def create_value(self, value, type_spec=None):
     type_spec = computation_types.to_type(type_spec)
     if isinstance(value, computation_impl.ComputationImpl):
         return await self.create_value(
             computation_impl.ComputationImpl.get_proto(value),
             executor_utils.reconcile_value_with_type_spec(
                 value, type_spec))
     py_typecheck.check_type(type_spec, computation_types.Type)
     hashable_key = _get_hashable_key(value, type_spec)
     try:
         identifier = self._cache.get(hashable_key)
     except TypeError as err:
         raise RuntimeError(
             'Failed to perform a hash table lookup with a value of Python '
             'type {} and TFF type {}, and payload {}: {}'.format(
                 py_typecheck.type_string(type(value)), type_spec, value,
                 err))
     if isinstance(identifier, CachedValueIdentifier):
         cached_value = self._cache.get(identifier)
         # If may be that the same payload appeared with a mismatching type spec,
         # which may be a legitimate use case if (as it happens) the payload alone
         # does not uniquely determine the type, so we simply opt not to reuse the
         # cache value and fallback on the regular behavior.
         if (cached_value is not None and type_spec is not None and
                 not cached_value.type_signature.is_equivalent_to(type_spec)
             ):
             identifier = None
     else:
         identifier = None
     if identifier is None:
         self._num_values_created = self._num_values_created + 1
         identifier = CachedValueIdentifier(str(self._num_values_created))
         self._cache[hashable_key] = identifier
         target_future = asyncio.ensure_future(
             self._target_executor.create_value(value, type_spec))
         cached_value = None
     if cached_value is None:
         cached_value = CachedValue(identifier, hashable_key, type_spec,
                                    target_future)
         self._cache[identifier] = cached_value
     try:
         await cached_value.target_future
     except Exception:
         # Invalidate the entire cache in the inner executor had an exception.
         # TODO(b/145514490): This is a bit heavy handed, there maybe caches where
         # only the current cache item needs to be invalidated; however this
         # currently only occurs when an inner RemoteExecutor has the backend go
         # down.
         self._cache = {}
         raise
     # No type check is necessary here; we have either checked
     # `is_equivalent_to` or just constructed `target_value`
     # explicitly with `type_spec`.
     return cached_value
Example #3
0
def to_representation_for_type(value, type_spec=None, backend=None):
    """Verifies or converts the `value` to executor payload matching `type_spec`.

  The following kinds of `value` are supported:

  * Computations, `pb.Computation` or `computation_impl.ConcreteComputation`.
    These are compiled and converted into `runtime.ComputationCallable`.

  * Numpy arrays and scalars, or Python scalars that are converted to Numpy.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    type_spec: An instance of `tff.Type`. Can be `None` for values that derive
      from `typed_object.TypedObject`.
    backend: Optional information about the backend, only required for
      computations. Must be `None` or an instance of `backend_info.BackendInfo`.

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
    ValueError: If the arguments are incorrect (e.g., missing `backend` for a
      computation-typed `value`).
  """
    type_spec = executor_utils.reconcile_value_with_type_spec(value, type_spec)
    if backend is not None:
        py_typecheck.check_type(backend, backend_info.BackendInfo)
    if isinstance(value, computation_impl.ConcreteComputation):
        return to_representation_for_type(
            computation_impl.ConcreteComputation.get_proto(value),
            type_spec=type_spec,
            backend=backend)
    elif isinstance(value, pb.Computation):
        if backend is None:
            raise ValueError('Missing backend info for a computation.')
        module = compiler.import_tensorflow_computation(value)
        return runtime.ComputationCallable(module, backend)
    elif isinstance(type_spec, computation_types.TensorType):
        type_spec.shape.assert_is_fully_defined()
        type_analysis.check_type(value, type_spec)
        if type_spec.shape.rank == 0:
            return np.dtype(type_spec.dtype.as_numpy_dtype).type(value)
        elif type_spec.shape.rank > 0:
            return np.array(value, dtype=type_spec.dtype.as_numpy_dtype)
        else:
            raise TypeError('Unsupported tensor shape {}.'.format(
                type_spec.shape))
    else:
        raise TypeError('Unexpected type {}.'.format(type_spec))
Example #4
0
def to_representation_for_type(value, type_spec, backend=None):
    """Verifies or converts the `value` to executor payload matching `type_spec`.

  The following kinds of `value` are supported:

  * Computations, either `pb.Computation` or `computation_impl.ComputationImpl`.

  * Numpy arrays and scalars, or Python scalars that are converted to Numpy.

  * Nested structures of the above.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    type_spec: An instance of `tff.Type`. Can be `None` for values that derive
      from `typed_object.TypedObject`.
    backend: The backend to use; an instance of `xla_client.Client`. Only used
      for functional types. Can be `None` if unused.

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
    ValueError: If the arguments are incorrect.
  """
    if backend is not None:
        py_typecheck.check_type(backend, xla_client.Client)
    if type_spec is not None:
        type_spec = computation_types.to_type(type_spec)
    type_spec = executor_utils.reconcile_value_with_type_spec(value, type_spec)
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value), type_spec,
            backend)
    if isinstance(value, pb.Computation):
        comp_type = type_serialization.deserialize_type(value.type)
        if type_spec is not None:
            comp_type.check_equivalent_to(type_spec)
        return runtime.ComputationCallable(value, comp_type, backend)
    if isinstance(type_spec, computation_types.StructType):
        return structure.map_structure(
            lambda v, t: to_representation_for_type(v, t, backend),
            structure.from_container(value, recursive=True), type_spec)
    if isinstance(type_spec, computation_types.TensorType):
        return runtime.normalize_tensor_representation(value, type_spec)
    raise TypeError('Unexpected type {}.'.format(type_spec))
 async def create_value(self, value, type_spec=None):
   type_spec = computation_types.to_type(type_spec)
   if isinstance(value, computation_impl.ComputationImpl):
     return await self.create_value(
         computation_impl.ComputationImpl.get_proto(value),
         executor_utils.reconcile_value_with_type_spec(value, type_spec))
   elif isinstance(value, pb.Computation):
     return await self._evaluate(value)
   elif type_spec is not None and type_spec.is_struct():
     v_el = structure.to_elements(structure.from_container(value))
     vals = await asyncio.gather(
         *[self.create_value(val, t) for (_, val), t in zip(v_el, type_spec)])
     return ReferenceResolvingExecutorValue(
         structure.Struct((name, val) for (name, _), val in zip(v_el, vals)))
   else:
     return ReferenceResolvingExecutorValue(await
                                            self._target_executor.create_value(
                                                value, type_spec))
Example #6
0
 async def create_value(self, value, type_spec=None):
     if isinstance(value, computation_impl.ComputationImpl):
         return await self.create_value(
             computation_impl.ComputationImpl.get_proto(value),
             executor_utils.reconcile_value_with_type_spec(
                 value, type_spec))
     elif isinstance(value, pb.Computation):
         return await self.create_value(
             building_blocks.ComputationBuildingBlock.from_proto(value),
             type_spec)
     elif isinstance(value, building_blocks.ComputationBuildingBlock):
         value = self._transformation_fn(value)
         py_typecheck.check_type(value,
                                 building_blocks.ComputationBuildingBlock)
         return await self._target_executor.create_value(
             value.proto, type_spec)
     else:
         return await self._target_executor.create_value(value, type_spec)
 def test_reconcile_value_with_type_spec_raises_type_error(
     self, value, type_spec):
   with self.assertRaises(TypeError):
     executor_utils.reconcile_value_with_type_spec(value, type_spec)
 def test_reconcile_value_with_type_spec_returns_type(self, value, type_spec,
                                                      expected_type):
   actual_type = executor_utils.reconcile_value_with_type_spec(
       value, type_spec)
   self.assertEqual(actual_type, expected_type)
Example #9
0
def to_representation_for_type(
    value: Any,
    tf_function_cache: MutableMapping[str, Any],
    type_spec: Optional[computation_types.Type] = None,
    device: Optional[tf.config.LogicalDevice] = None) -> Any:
  """Verifies or converts the `value` to an eager object matching `type_spec`.

  WARNING: This function is only partially implemented. It does not support
  data sets at this point.

  The output of this function is always an eager tensor, eager dataset, a
  representation of a TensorFlow computation, or a nested structure of those
  that matches `type_spec`, and when `device` has been specified, everything
  is placed on that device on a best-effort basis.

  TensorFlow computations are represented here as zero- or one-argument Python
  callables that accept their entire argument bundle as a single Python object.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    tf_function_cache: A cache obeying `dict` semantics that can be used to look
      up previously embedded TensorFlow functions.
    type_spec: An instance of `tff.Type`, can be `None` for values that derive
      from `typed_object.TypedObject`.
    device: An optional `tf.config.LogicalDevice` to place the value on (for
      tensor-level values).

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
  """
  type_spec = executor_utils.reconcile_value_with_type_spec(value, type_spec)
  if isinstance(value, computation_impl.ConcreteComputation):
    return to_representation_for_type(
        computation_impl.ConcreteComputation.get_proto(value),
        tf_function_cache, type_spec, device)
  elif isinstance(value, pb.Computation):
    computation_oneof = value.WhichOneof('computation')
    if computation_oneof != 'tensorflow':
      raise ValueError('Eager TF Executor can only execute computations of '
                       'TensorFlow flavor; encountered a computation of type '
                       f'{computation_oneof}')
    return _to_computation_internal_rep(
        value=value,
        tf_function_cache=tf_function_cache,
        type_spec=type_spec,
        device=device)
  elif type_spec.is_struct():
    return _to_struct_internal_rep(
        value=value,
        tf_function_cache=tf_function_cache,
        type_spec=type_spec,
        device=device)
  elif device is not None:
    py_typecheck.check_type(device, tf.config.LogicalDevice)
    with tf.device(device.name):
      return to_representation_for_type(
          value, tf_function_cache, type_spec=type_spec, device=None)
  elif isinstance(value, EagerValue):
    return value.internal_representation
  elif isinstance(value, executor_value_base.ExecutorValue):
    raise TypeError(
        'Cannot accept a value embedded within a non-eager executor.')
  elif type_spec.is_tensor():
    return _to_tensor_internal_rep(value=value, type_spec=type_spec)
  elif type_spec.is_sequence():
    return _to_sequence_internal_rep(value=value, type_spec=type_spec)
  else:
    raise TypeError(
        f'Unexpected type {type_spec} for value of type {type(value)}: {value}')
  async def create_value(
      self,
      value: Any,
      type_spec: Any = None) -> executor_value_base.ExecutorValue:
    """Creates an embedded value from the given `value` and `type_spec`.

    The kinds of supported `value`s are:

    * An instance of `intrinsic_defs.IntrinsicDef`.

    * An instance of `placements.PlacementLiteral`.

    * An instance of `pb.Computation` if of one of the following kinds:
      intrinsic, lambda, tensorflow, xla, or data.

    * A Python `list` if `type_spec` is a federated type.

      Note: The `value` must be a list even if it is of an `all_equal` type or
      if there is only a single participant associated with the given placement.

    * A Python value if `type_spec` is a non-functional, non-federated type.

    Args:
      value: An object to embed in the executor, one of the supported types
        defined by above.
      type_spec: An optional type convertible to instance of `tff.Type` via
        `tff.to_type`, the type of `value`.

    Returns:
      An instance of `executor_value_base.ExecutorValue` representing a value
      embedded in the `FederatingExecutor` using a particular
      `FederatingStrategy`.

    Raises:
      TypeError: If the `value` and `type_spec` do not match.
      ValueError: If `value` is not a kind supported by the
        `FederatingExecutor`.
    """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, intrinsic_defs.IntrinsicDef):
      type_analysis.check_concrete_instance_of(type_spec, value.type_signature)
      return self._strategy.ingest_value(value, type_spec)
    elif isinstance(value, placements.PlacementLiteral):
      if type_spec is None:
        type_spec = computation_types.PlacementType()
      type_spec.check_placement()
      return self._strategy.ingest_value(value, type_spec)
    elif isinstance(value, computation_impl.ConcreteComputation):
      return await self.create_value(
          computation_impl.ConcreteComputation.get_proto(value),
          executor_utils.reconcile_value_with_type_spec(value, type_spec))
    elif isinstance(value, pb.Computation):
      deserialized_type = type_serialization.deserialize_type(value.type)
      if type_spec is None:
        type_spec = deserialized_type
      else:
        type_spec.check_assignable_from(deserialized_type)
      which_computation = value.WhichOneof('computation')
      if which_computation in ['lambda', 'tensorflow', 'xla', 'data']:
        return self._strategy.ingest_value(value, type_spec)
      elif which_computation == 'intrinsic':
        if value.intrinsic.uri in FederatingExecutor._FORWARDED_INTRINSICS:
          return self._strategy.ingest_value(value, type_spec)
        intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(value.intrinsic.uri)
        if intrinsic_def is None:
          raise ValueError('Encountered an unrecognized intrinsic "{}".'.format(
              value.intrinsic.uri))
        return await self.create_value(intrinsic_def, type_spec)
      else:
        raise ValueError(
            'Unsupported computation building block of type "{}".'.format(
                which_computation))
    elif type_spec is not None and type_spec.is_federated():
      return await self._strategy.compute_federated_value(value, type_spec)
    else:
      result = await self._unplaced_executor.create_value(value, type_spec)
      return self._strategy.ingest_value(result, type_spec)