Пример #1
0
def to_representation_for_type(value, type_spec=None, backend=None):
    """Verifies or converts the `value` to executor payload matching `type_spec`.

  The following kinds of `value` are supported:

  * Computations, either `pb.Computation` or `computation_impl.ComputationImpl`.
    These are compiled and converted into `runtime.ComputationCallable`.

  * Numpy arrays and scalars, or Python scalars that are converted to Numpy.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    type_spec: An instance of `tff.Type`. Can be `None` for values that derive
      from `typed_object.TypedObject`.
    backend: Optional information about the backend, only required for
      computations. Must be `None` or an instance of `backend_info.BackendInfo`.

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
    ValueError: If the arguments are incorrect (e.g., missing `backend` for a
      computation-typed `value`).
  """
    type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
    if backend is not None:
        py_typecheck.check_type(backend, backend_info.BackendInfo)
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value),
            type_spec=type_spec,
            backend=backend)
    elif isinstance(value, pb.Computation):
        if backend is None:
            raise ValueError('Missing backend info for a computation.')
        module = compiler.import_tensorflow_computation(value)
        return runtime.ComputationCallable(module, backend)
    elif isinstance(type_spec, computation_types.TensorType):
        type_spec.shape.assert_is_fully_defined()
        type_analysis.check_type(value, type_spec)
        if type_spec.shape.rank == 0:
            return np.dtype(type_spec.dtype.as_numpy_dtype).type(value)
        elif type_spec.shape.rank > 0:
            return np.array(value, dtype=type_spec.dtype.as_numpy_dtype)
        else:
            raise TypeError('Unsupported tensor shape {}.'.format(
                type_spec.shape))
    else:
        raise TypeError('Unexpected type {}.'.format(type_spec))
Пример #2
0
 async def create_value(self, value, type_spec=None):
   type_spec = computation_types.to_type(type_spec)
   if isinstance(value, computation_impl.ComputationImpl):
     return await self.create_value(
         computation_impl.ComputationImpl.get_proto(value),
         type_utils.reconcile_value_with_type_spec(value, type_spec))
   py_typecheck.check_type(type_spec, computation_types.Type)
   hashable_key = _get_hashable_key(value, type_spec)
   try:
     identifier = self._cache.get(hashable_key)
   except TypeError as err:
     raise RuntimeError(
         'Failed to perform a hash table lookup with a value of Python '
         'type {} and TFF type {}, and payload {}: {}'.format(
             py_typecheck.type_string(type(value)), type_spec, value, err))
   if isinstance(identifier, CachedValueIdentifier):
     cached_value = self._cache.get(identifier)
     # If may be that the same payload appeared with a mismatching type spec,
     # which may be a legitimate use case if (as it happens) the payload alone
     # does not uniquely determine the type, so we simply opt not to reuse the
     # cache value and fallback on the regular behavior.
     if (cached_value is not None and type_spec is not None and
         not cached_value.type_signature.is_equivalent_to(type_spec)):
       identifier = None
   else:
     identifier = None
   if identifier is None:
     self._num_values_created = self._num_values_created + 1
     identifier = CachedValueIdentifier(str(self._num_values_created))
     self._cache[hashable_key] = identifier
     target_future = asyncio.ensure_future(
         self._target_executor.create_value(value, type_spec))
     cached_value = None
   if cached_value is None:
     cached_value = CachedValue(identifier, hashable_key, type_spec,
                                target_future)
     self._cache[identifier] = cached_value
   try:
     await cached_value.target_future
   except Exception:
     # Invalidate the entire cache in the inner executor had an exception.
     # TODO(b/145514490): This is a bit heavy handed, there maybe caches where
     # only the current cache item needs to be invalidated; however this
     # currently only occurs when an inner RemoteExecutor has the backend go
     # down.
     self._cache = {}
     raise
   # No type check is necessary here; we have either checked
   # `is_equivalent_to` or just constructed `target_value`
   # explicitly with `type_spec`.
   return cached_value
Пример #3
0
 async def create_value(self, value, type_spec=None):
   if isinstance(value, computation_impl.ComputationImpl):
     return await self.create_value(
         computation_impl.ComputationImpl.get_proto(value),
         type_utils.reconcile_value_with_type_spec(value, type_spec))
   elif isinstance(value, pb.Computation):
     return await self.create_value(
         building_blocks.ComputationBuildingBlock.from_proto(value), type_spec)
   elif isinstance(value, building_blocks.ComputationBuildingBlock):
     value = self._transformation_fn(value)
     py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
     return await self._target_executor.create_value(value.proto, type_spec)
   else:
     return await self._target_executor.create_value(value, type_spec)
Пример #4
0
 async def create_value(self, value, type_spec=None):
     type_spec = computation_types.to_type(type_spec)
     if isinstance(value, computation_impl.ComputationImpl):
         return await self.create_value(
             computation_impl.ComputationImpl.get_proto(value),
             type_utils.reconcile_value_with_type_spec(value, type_spec))
     py_typecheck.check_type(type_spec, computation_types.Type)
     hashable_key = _get_hashable_key(value, type_spec)
     try:
         identifier = self._cache[hashable_key]
     except KeyError:
         identifier = None
     except TypeError as err:
         raise RuntimeError(
             'Failed to perform a has table lookup with a value of Python '
             'type {} and TFF type {}, and payload {}: {}'.format(
                 py_typecheck.type_string(type(value)), type_spec, value,
                 err))
     if isinstance(identifier, CachedValueIdentifier):
         try:
             cached_value = self._cache[identifier]
         except KeyError:
             cached_value = None
         # If may be that the same payload appeared with a mismatching type spec,
         # which may be a legitimate use case if (as it happens) the payload alone
         # does not uniquely determine the type, so we simply opt not to reuse the
         # cache value and fallback on the regular behavior.
         if (cached_value is not None and type_spec is not None
                 and not type_utils.are_equivalent_types(
                     cached_value.type_signature, type_spec)):
             identifier = None
     else:
         identifier = None
     if identifier is None:
         self._num_values_created = self._num_values_created + 1
         identifier = CachedValueIdentifier(str(self._num_values_created))
         self._cache[hashable_key] = identifier
         target_future = asyncio.ensure_future(
             self._target_executor.create_value(value, type_spec))
         cached_value = None
     if cached_value is None:
         cached_value = CachedValue(identifier, hashable_key, type_spec,
                                    target_future)
         self._cache[identifier] = cached_value
     await cached_value.target_future
     # No type check is necessary here; we have either checked
     # `type_utils.are_equivalent_types` or just constructed `target_value`
     # explicitly with `type_spec`.
     return cached_value
Пример #5
0
 async def create_value(self, value, type_spec=None):
     type_spec = computation_types.to_type(type_spec)
     if isinstance(value, computation_impl.ComputationImpl):
         return await self.create_value(
             computation_impl.ComputationImpl.get_proto(value),
             type_utils.reconcile_value_with_type_spec(value, type_spec))
     elif isinstance(value, pb.Computation):
         result = LambdaExecutorValue(value)
         type_utils.reconcile_value_with_type_spec(result, type_spec)
         return result
     elif isinstance(type_spec, computation_types.NamedTupleType):
         v_el = anonymous_tuple.to_elements(
             anonymous_tuple.from_container(value))
         vals = await asyncio.gather(*[
             self.create_value(val, t)
             for (_, val), t in zip(v_el, type_spec)
         ])
         return LambdaExecutorValue(
             anonymous_tuple.AnonymousTuple(
                 (name, val) for (name, _), val in zip(v_el, vals)))
     else:
         return LambdaExecutorValue(await
                                    self._target_executor.create_value(
                                        value, type_spec))
def serialize_value(
    value: Any,
    type_spec: Optional[computation_types.Type] = None
) -> _SerializeReturnType:
    """Serializes a value into `executor_pb2.Value`.

  We use a switch/function pattern in the body here (and in `deserialize_value`
  below in order to persist more information in traces and profiling.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and the
    returned `ret_type_spec` is an instance of `tff.Type` that represents the
    TFF type of the serialized value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        return _serialize_computation(value, type_spec)
    elif isinstance(value, computation_impl.ComputationImpl):
        return _serialize_computation(
            computation_impl.ComputationImpl.get_proto(value),
            type_utils.reconcile_value_with_type_spec(value, type_spec))
    elif type_spec is None:
        raise TypeError(
            'A type hint is required when serializing a value which '
            'is not a TFF computation. Asked to serialized value {v} '
            ' of type {t} with None type spec.'.format(v=value, t=type(value)))
    elif type_spec.is_tensor():
        return _serialize_tensor_value(value, type_spec)
    elif type_spec.is_sequence():
        return _serialize_sequence_value(value, type_spec)
    elif type_spec.is_struct():
        return _serialize_struct_type(value, type_spec)
    elif type_spec.is_federated():
        return _serialize_federated_value(value, type_spec)
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
Пример #7
0
def serialize_value(value, type_spec=None):
    """Serializes a value into `executor_pb2.Value`.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and the
    returned `ret_type_spec` is an instance of `tff.Type` that represents the
    TFF type of the serialized value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        type_spec = type_utils.reconcile_value_type_with_type_spec(
            type_serialization.deserialize_type(value.type), type_spec)
        return executor_pb2.Value(computation=value), type_spec
    elif isinstance(value, computation_impl.ComputationImpl):
        return serialize_value(
            computation_impl.ComputationImpl.get_proto(value),
            type_utils.reconcile_value_with_type_spec(value, type_spec))
    elif isinstance(type_spec, computation_types.TensorType):
        return serialize_tensor_value(value, type_spec)
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elements = anonymous_tuple.to_elements(type_spec)
        val_elements = anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value))
        tup_elems = []
        for (e_name, e_type), (_, e_val) in zip(type_elements, val_elements):
            e_proto, _ = serialize_value(e_val, e_type)
            tup_elems.append(
                executor_pb2.Value.Tuple.Element(
                    name=e_name if e_name else None, value=e_proto))
        result_proto = (executor_pb2.Value(tuple=executor_pb2.Value.Tuple(
            element=tup_elems)))
        return result_proto, type_spec
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
Пример #8
0
 async def create_value(self, value, type_spec=None):
   type_spec = computation_types.to_type(type_spec)
   if isinstance(value, computation_impl.ComputationImpl):
     return await self.create_value(
         computation_impl.ComputationImpl.get_proto(value),
         type_utils.reconcile_value_with_type_spec(value, type_spec))
   elif isinstance(value, pb.Computation):
     return await self._evaluate(value)
   elif type_spec is not None and type_spec.is_struct():
     v_el = structure.to_elements(structure.from_container(value))
     vals = await asyncio.gather(
         *[self.create_value(val, t) for (_, val), t in zip(v_el, type_spec)])
     return ReferenceResolvingExecutorValue(
         structure.Struct((name, val) for (name, _), val in zip(v_el, vals)))
   else:
     return ReferenceResolvingExecutorValue(await
                                            self._target_executor.create_value(
                                                value, type_spec))
Пример #9
0
  def test_reconcile_value_with_type_spec(self):
    self.assertEqual(
        str(type_utils.reconcile_value_with_type_spec(10, tf.int32)), 'int32')

    @computations.tf_computation(tf.bool)
    def comp(x):
      return x

    self.assertEqual(
        str(type_utils.reconcile_value_with_type_spec(comp, None)),
        '(bool -> bool)')

    self.assertEqual(
        str(
            type_utils.reconcile_value_with_type_spec(
                comp, computation_types.FunctionType(tf.bool, tf.bool))),
        '(bool -> bool)')

    with self.assertRaises(TypeError):
      type_utils.reconcile_value_with_type_spec(10, None)

    with self.assertRaises(TypeError):
      type_utils.reconcile_value_with_type_spec(comp, tf.int32)
Пример #10
0
def serialize_value(value, type_spec=None):
    """Serializes a value into `executor_pb2.Value`.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and the
    returned `ret_type_spec` is an instance of `tff.Type` that represents the
    TFF type of the serialized value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        type_spec = type_utils.reconcile_value_type_with_type_spec(
            type_serialization.deserialize_type(value.type), type_spec)
        return executor_pb2.Value(computation=value), type_spec
    elif isinstance(value, computation_impl.ComputationImpl):
        return serialize_value(
            computation_impl.ComputationImpl.get_proto(value),
            type_utils.reconcile_value_with_type_spec(value, type_spec))
    elif isinstance(type_spec, computation_types.TensorType):
        return serialize_tensor_value(value, type_spec)
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elements = anonymous_tuple.to_elements(type_spec)
        val_elements = anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value))
        tup_elems = []
        for (e_name, e_type), (_, e_val) in zip(type_elements, val_elements):
            e_proto, _ = serialize_value(e_val, e_type)
            tup_elems.append(
                executor_pb2.Value.Tuple.Element(
                    name=e_name if e_name else None, value=e_proto))
        result_proto = (executor_pb2.Value(tuple=executor_pb2.Value.Tuple(
            element=tup_elems)))
        return result_proto, type_spec
    elif isinstance(type_spec, computation_types.SequenceType):
        if not isinstance(value,
                          type_conversions.TF_DATASET_REPRESENTATION_TYPES):
            raise TypeError(
                'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
                    py_typecheck.type_string(type(value)),
                    type_spec if type_spec is not None else 'unknown'))

        value_type = computation_types.SequenceType(
            computation_types.to_type(value.element_spec))
        if not type_analysis.is_assignable_from(type_spec, value_type):
            raise TypeError(
                'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
                .format(value_type,
                        type_spec if type_spec is not None else 'unknown'))

        return serialize_sequence_value(value), type_spec
    elif isinstance(type_spec, computation_types.FederatedType):
        if type_spec.all_equal:
            value = [value]
        else:
            py_typecheck.check_type(value, list)
        items = []
        for v in value:
            it, it_type = serialize_value(v, type_spec.member)
            type_analysis.check_assignable_from(type_spec.member, it_type)
            items.append(it)
        result_proto = executor_pb2.Value(
            federated=executor_pb2.Value.Federated(
                type=type_serialization.serialize_type(type_spec).federated,
                value=items))
        return result_proto, type_spec
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
Пример #11
0
def to_representation_for_type(
        value: Any,
        tf_function_cache: MutableMapping[str, Any],
        type_spec: Optional[computation_types.Type] = None,
        device: Optional[tf.config.LogicalDevice] = None) -> Any:
    """Verifies or converts the `value` to an eager object matching `type_spec`.

  WARNING: This function is only partially implemented. It does not support
  data sets at this point.

  The output of this function is always an eager tensor, eager dataset, a
  representation of a TensorFlow computation, or a nested structure of those
  that matches `type_spec`, and when `device` has been specified, everything
  is placed on that device on a best-effort basis.

  TensorFlow computations are represented here as zero- or one-argument Python
  callables that accept their entire argument bundle as a single Python object.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    tf_function_cache: A cache obeying `dict` semantics that can be used to look
      up previously embedded TensorFlow functions.
    type_spec: An instance of `tff.Type`, can be `None` for values that derive
      from `typed_object.TypedObject`.
    device: An optional `tf.config.LogicalDevice` to place the value on (for
      tensor-level values).

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
  """
    type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value),
            tf_function_cache, type_spec, device)
    elif isinstance(value, pb.Computation):
        return _to_computation_internal_rep(
            value=value,
            tf_function_cache=tf_function_cache,
            type_spec=type_spec,
            device=device)
    elif type_spec.is_struct():
        return _to_struct_internal_rep(value=value,
                                       tf_function_cache=tf_function_cache,
                                       type_spec=type_spec,
                                       device=device)
    elif device is not None:
        py_typecheck.check_type(device, tf.config.LogicalDevice)
        with tf.device(device.name):
            return to_representation_for_type(value,
                                              tf_function_cache,
                                              type_spec=type_spec,
                                              device=None)
    elif isinstance(value, EagerValue):
        return value.internal_representation
    elif isinstance(value, executor_value_base.ExecutorValue):
        raise TypeError(
            'Cannot accept a value embedded within a non-eager executor.')
    elif type_spec.is_tensor():
        return _to_tensor_internal_rep(value=value, type_spec=type_spec)
    elif type_spec.is_sequence():
        return _to_sequence_internal_rep(value=value, type_spec=type_spec)
    else:
        raise TypeError('Unexpected type {}.'.format(type_spec))
Пример #12
0
 async def create_value(self, value, type_spec=None):
     type_spec = computation_types.to_type(type_spec)
     if isinstance(value, intrinsic_defs.IntrinsicDef):
         if not type_utils.is_concrete_instance_of(type_spec,
                                                   value.type_signature):
             raise TypeError(
                 'Incompatible type {} used with intrinsic {}.'.format(
                     type_spec, value.uri))
         else:
             return FederatedExecutorValue(value, type_spec)
     if isinstance(value, placement_literals.PlacementLiteral):
         if type_spec is not None:
             py_typecheck.check_type(type_spec,
                                     computation_types.PlacementType)
         return FederatedExecutorValue(value,
                                       computation_types.PlacementType())
     elif isinstance(value, computation_impl.ComputationImpl):
         return await self.create_value(
             computation_impl.ComputationImpl.get_proto(value),
             type_utils.reconcile_value_with_type_spec(value, type_spec))
     elif isinstance(value, pb.Computation):
         if type_spec is None:
             type_spec = type_serialization.deserialize_type(value.type)
         which_computation = value.WhichOneof('computation')
         if which_computation in ['tensorflow', 'lambda']:
             return FederatedExecutorValue(value, type_spec)
         elif which_computation == 'reference':
             raise ValueError(
                 'Encountered an unexpected unbound references "{}".'.
                 format(value.reference.name))
         elif which_computation == 'intrinsic':
             intr = intrinsic_defs.uri_to_intrinsic_def(value.intrinsic.uri)
             if intr is None:
                 raise ValueError(
                     'Encountered an unrecognized intrinsic "{}".'.format(
                         value.intrinsic.uri))
             py_typecheck.check_type(intr, intrinsic_defs.IntrinsicDef)
             return await self.create_value(intr, type_spec)
         elif which_computation == 'placement':
             return await self.create_value(
                 placement_literals.uri_to_placement_literal(
                     value.placement.uri), type_spec)
         elif which_computation == 'call':
             parts = [value.call.function]
             if value.call.argument.WhichOneof('computation'):
                 parts.append(value.call.argument)
             parts = await asyncio.gather(
                 *[self.create_value(x) for x in parts])
             return await self.create_call(
                 parts[0], parts[1] if len(parts) > 1 else None)
         elif which_computation == 'tuple':
             element_values = await asyncio.gather(
                 *[self.create_value(x.value) for x in value.tuple.element])
             return await self.create_tuple(
                 anonymous_tuple.AnonymousTuple([
                     (e.name if e.name else None, v)
                     for e, v in zip(value.tuple.element, element_values)
                 ]))
         elif which_computation == 'selection':
             which_selection = value.selection.WhichOneof('selection')
             if which_selection == 'name':
                 name = value.selection.name
                 index = None
             elif which_selection != 'index':
                 raise ValueError(
                     'Unrecognized selection type: "{}".'.format(
                         which_selection))
             else:
                 index = value.selection.index
                 name = None
             return await self.create_selection(await self.create_value(
                 value.selection.source),
                                                index=index,
                                                name=name)
         else:
             raise ValueError(
                 'Unsupported computation building block of type "{}".'.
                 format(which_computation))
     else:
         py_typecheck.check_type(type_spec, computation_types.Type)
         if isinstance(type_spec, computation_types.FunctionType):
             raise ValueError(
                 'Encountered a value of a functional TFF type {} and Python type '
                 '{} that is not of one of the recognized representations.'.
                 format(type_spec, py_typecheck.type_string(type(value))))
         elif isinstance(type_spec, computation_types.FederatedType):
             children = self._target_executors.get(type_spec.placement)
             if not children:
                 raise ValueError(
                     'Placement "{}" is not configured in this executor.'.
                     format(type_spec.placement))
             py_typecheck.check_type(children, list)
             if not type_spec.all_equal:
                 py_typecheck.check_type(value,
                                         (list, tuple, set, frozenset))
                 if not isinstance(value, list):
                     value = list(value)
             elif isinstance(value, list):
                 raise ValueError(
                     'An all_equal value should be passed directly, not as a list.'
                 )
             else:
                 value = [value for _ in children]
             if len(value) != len(children):
                 raise ValueError(
                     'Federated value contains {} items, but the placement {} in this '
                     'executor is configured with {} participants.'.format(
                         len(value), type_spec.placement, len(children)))
             child_vals = await asyncio.gather(*[
                 c.create_value(v, type_spec.member)
                 for v, c in zip(value, children)
             ])
             return FederatedExecutorValue(child_vals, type_spec)
         else:
             child = self._target_executors.get(None)
             if not child or len(child) > 1:
                 raise RuntimeError(
                     'Executor is not configured for unplaced values.')
             else:
                 return FederatedExecutorValue(
                     await child[0].create_value(value, type_spec),
                     type_spec)
Пример #13
0
 def test_reconcile_value_with_type_spec_raises_type_error(
         self, value, type_spec):
     with self.assertRaises(TypeError):
         type_utils.reconcile_value_with_type_spec(value, type_spec)
Пример #14
0
 def test_reconcile_value_with_type_spec_returns_type(
         self, value, type_spec, expected_type):
     actual_type = type_utils.reconcile_value_with_type_spec(
         value, type_spec)
     self.assertEqual(actual_type, expected_type)
Пример #15
0
def to_representation_for_type(value, type_spec=None, device=None):
    """Verifies or converts the `value` to an eager object matching `type_spec`.

  WARNING: This function is only partially implemented. It does not support
  data sets at this point.

  The output of this function is always an eager tensor, eager dataset, a
  representation of a TensorFlow computation, or a nested structure of those
  that matches `type_spec`, and when `device` has been specified, everything
  is placed on that device on a best-effort basis.

  TensorFlow computations are represented here as zero- or one-argument Python
  callables that accept their entire argument bundle as a single Python object.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    type_spec: An instance of `tff.Type`, can be `None` for values that derive
      from `typed_object.TypedObject`.
    device: The optional device to place the value on (for tensor-level values).

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
  """
    type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value), type_spec,
            device)
    elif isinstance(value, pb.Computation):
        return embed_tensorflow_computation(value, type_spec, device)
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elem = anonymous_tuple.to_elements(type_spec)
        value_elem = (anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value)))
        result_elem = []
        if len(type_elem) != len(value_elem):
            raise TypeError(
                'Expected a {}-element tuple, found {} elements.'.format(
                    len(type_elem), len(value_elem)))
        for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):
            if t_name != v_name:
                raise TypeError(
                    'Mismatching element names in type vs. value: {} vs. {}.'.
                    format(t_name, v_name))
            el_repr = to_representation_for_type(el_val, el_type, device)
            result_elem.append((t_name, el_repr))
        return anonymous_tuple.AnonymousTuple(result_elem)
    elif device is not None:
        py_typecheck.check_type(device, str)
        with tf.device(device):
            return to_representation_for_type(value,
                                              type_spec=type_spec,
                                              device=None)
    elif isinstance(value, EagerValue):
        return value.internal_representation
    elif isinstance(value, executor_value_base.ExecutorValue):
        raise TypeError(
            'Cannot accept a value embedded within a non-eager executor.')
    elif isinstance(type_spec, computation_types.TensorType):
        if not tf.is_tensor(value):
            value = tf.convert_to_tensor(value, dtype=type_spec.dtype)
        elif hasattr(value, 'read_value'):
            # a tf.Variable-like result, get a proper tensor.
            value = value.read_value()
        value_type = (computation_types.TensorType(value.dtype.base_dtype,
                                                   value.shape))
        if not type_utils.is_assignable_from(type_spec, value_type):
            raise TypeError(
                'The apparent type {} of a tensor {} does not match the expected '
                'type {}.'.format(value_type, value, type_spec))
        return value
    elif isinstance(type_spec, computation_types.SequenceType):
        if isinstance(value, list):
            value = tensorflow_utils.make_data_set_from_elements(
                None, value, type_spec.element)
        py_typecheck.check_type(value,
                                type_utils.TF_DATASET_REPRESENTATION_TYPES)
        element_type = computation_types.to_type(
            tf.data.experimental.get_structure(value))
        value_type = computation_types.SequenceType(element_type)
        type_utils.check_assignable_from(type_spec, value_type)
        return value
    else:
        raise TypeError('Unexpected type {}.'.format(type_spec))
Пример #16
0
    async def create_value(
            self,
            value: Any,
            type_spec: Any = None) -> executor_value_base.ExecutorValue:
        """Creates an embedded value from the given `value` and `type_spec`.

    The kinds of supported `value`s are:

    * An instance of `intrinsic_defs.IntrinsicDef`.

    * An instance of `placement_literals.PlacementLiteral`.

    * An instance of `pb.Computation` if of one of the following kinds:
      intrinsic, lambda, and tensorflow.

    * A Python `list` if `type_spec` is a federated type.

      Note: The `value` must be a list even if it is of an `all_equal` type or
      if there is only a single participant associated with the given placement.

    * A Python value if `type_spec` is a non-functional, non-federated type.

    Args:
      value: An object to embed in the executor, one of the supported types
        defined by above.
      type_spec: An optional type convertible to instance of `tff.Type` via
        `tff.to_type`, the type of `value`.

    Returns:
      An instance of `executor_value_base.ExecutorValue` representing a value
      embedded in the `FederatingExecutor` using a particular
      `FederatingStrategy`.

    Raises:
      TypeError: If the `value` and `type_spec` do not match.
      ValueError: If `value` is not a kind supported by the
        `FederatingExecutor`.
    """
        type_spec = computation_types.to_type(type_spec)
        if isinstance(value, intrinsic_defs.IntrinsicDef):
            type_analysis.check_concrete_instance_of(type_spec,
                                                     value.type_signature)
            return self._strategy.ingest_value(value, type_spec)
        elif isinstance(value, placement_literals.PlacementLiteral):
            if type_spec is None:
                type_spec = computation_types.PlacementType()
            type_spec.check_placement()
            return self._strategy.ingest_value(value, type_spec)
        elif isinstance(value, computation_impl.ComputationImpl):
            return await self.create_value(
                computation_impl.ComputationImpl.get_proto(value),
                type_utils.reconcile_value_with_type_spec(value, type_spec))
        elif isinstance(value, pb.Computation):
            deserialized_type = type_serialization.deserialize_type(value.type)
            if type_spec is None:
                type_spec = deserialized_type
            else:
                type_spec.check_assignable_from(deserialized_type)
            which_computation = value.WhichOneof('computation')
            if which_computation in ['lambda', 'tensorflow']:
                return self._strategy.ingest_value(value, type_spec)
            elif which_computation == 'intrinsic':
                intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(
                    value.intrinsic.uri)
                if intrinsic_def is None:
                    raise ValueError(
                        'Encountered an unrecognized intrinsic "{}".'.format(
                            value.intrinsic.uri))
                return await self.create_value(intrinsic_def, type_spec)
            else:
                raise ValueError(
                    'Unsupported computation building block of type "{}".'.
                    format(which_computation))
        elif type_spec is not None and type_spec.is_federated():
            return await self._strategy.compute_federated_value(
                value, type_spec)
        else:
            result = await self._unplaced_executor.create_value(
                value, type_spec)
            return self._strategy.ingest_value(result, type_spec)
Пример #17
0
    async def create_value(self, value, type_spec=None):
        """A coroutine that creates embedded value from `value` of type `type_spec`.

    See the `FederatingExecutorValue` for detailed information about the
    `value`s and `type_spec`s that can be embedded using `create_value`.

    Args:
      value: An object that represents the value to embed within the executor.
      type_spec: An optional `tff.Type` of the value represented by this object,
        or something convertible to it.

    Returns:
      An instance of `FederatingExecutorValue` that represents the embedded
      value.

    Raises:
      TypeError: If the `value` and `type_spec` do not match.
      ValueError: If `value` is not a kind recognized by the
        `FederatingExecutor`.
    """
        type_spec = computation_types.to_type(type_spec)
        if isinstance(type_spec, computation_types.FederatedType):
            self._check_executor_compatible_with_placement(type_spec.placement)
        elif (isinstance(type_spec, computation_types.FunctionType) and
              isinstance(type_spec.result, computation_types.FederatedType)):
            self._check_executor_compatible_with_placement(
                type_spec.result.placement)
        if isinstance(value, intrinsic_defs.IntrinsicDef):
            if not type_analysis.is_concrete_instance_of(
                    type_spec, value.type_signature):
                raise TypeError(
                    'Incompatible type {} used with intrinsic {}.'.format(
                        type_spec, value.uri))
            return FederatingExecutorValue(value, type_spec)
        elif isinstance(value, placement_literals.PlacementLiteral):
            if type_spec is None:
                type_spec = computation_types.PlacementType()
            else:
                py_typecheck.check_type(type_spec,
                                        computation_types.PlacementType)
            return FederatingExecutorValue(value, type_spec)
        elif isinstance(value, computation_impl.ComputationImpl):
            return await self.create_value(
                computation_impl.ComputationImpl.get_proto(value),
                type_utils.reconcile_value_with_type_spec(value, type_spec))
        elif isinstance(value, pb.Computation):
            deserialized_type = type_serialization.deserialize_type(value.type)
            if type_spec is None:
                type_spec = deserialized_type
            else:
                type_analysis.check_assignable_from(type_spec,
                                                    deserialized_type)
            which_computation = value.WhichOneof('computation')
            if which_computation in ['lambda', 'tensorflow']:
                return FederatingExecutorValue(value, type_spec)
            elif which_computation == 'intrinsic':
                intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(
                    value.intrinsic.uri)
                if intrinsic_def is None:
                    raise ValueError(
                        'Encountered an unrecognized intrinsic "{}".'.format(
                            value.intrinsic.uri))
                return await self.create_value(intrinsic_def, type_spec)
            else:
                raise ValueError(
                    'Unsupported computation building block of type "{}".'.
                    format(which_computation))
        elif isinstance(type_spec, computation_types.FederatedType):
            self._check_value_compatible_with_placement(
                value, type_spec.placement, type_spec.all_equal)
            children = self._target_executors[type_spec.placement]
            if type_spec.all_equal:
                value = [value for _ in children]
            results = await asyncio.gather(*[
                c.create_value(v, type_spec.member)
                for v, c in zip(value, children)
            ])
            return FederatingExecutorValue(results, type_spec)
        else:
            child = self._target_executors[None][0]
            return FederatingExecutorValue(
                await child.create_value(value, type_spec), type_spec)
    async def create_value(self, value, type_spec=None):
        """A coroutine that creates embedded value from `value` of type `type_spec`.

    See the `FederatingExecutorValue` for detailed information about the
    `value`s and `type_spec`s that can be embedded using `create_value`.

    Args:
      value: An object that represents the value to embed within the executor.
      type_spec: An optional `tff.Type` of the value represented by this object,
        or something convertible to it.

    Returns:
      An instance of `FederatingExecutorValue` that represents the embedded
      value.

    Raises:
      TypeError: If the `value` and `type_spec` do not match.
      ValueError: If `value` is not a kind recognized by the
        `FederatingExecutor`.
    """
        type_spec = computation_types.to_type(type_spec)
        if isinstance(type_spec, computation_types.FederatedType):
            self._check_executor_compatible_with_placement(type_spec.placement)
        elif (isinstance(type_spec, computation_types.FunctionType) and
              isinstance(type_spec.result, computation_types.FederatedType)):
            self._check_executor_compatible_with_placement(
                type_spec.result.placement)
        if isinstance(value, intrinsic_defs.IntrinsicDef):
            if not type_analysis.is_concrete_instance_of(
                    type_spec, value.type_signature):
                raise TypeError(
                    'Incompatible type {} used with intrinsic {}.'.format(
                        type_spec, value.uri))
            return FederatingExecutorValue(value, type_spec)
        elif isinstance(value, placement_literals.PlacementLiteral):
            if type_spec is None:
                type_spec = computation_types.PlacementType()
            else:
                py_typecheck.check_type(type_spec,
                                        computation_types.PlacementType)
            return FederatingExecutorValue(value, type_spec)
        elif isinstance(value, computation_impl.ComputationImpl):
            return await self.create_value(
                computation_impl.ComputationImpl.get_proto(value),
                type_utils.reconcile_value_with_type_spec(value, type_spec))
        elif isinstance(value, pb.Computation):
            deserialized_type = type_serialization.deserialize_type(value.type)
            if type_spec is None:
                type_spec = deserialized_type
            else:
                type_analysis.check_assignable_from(type_spec,
                                                    deserialized_type)
            which_computation = value.WhichOneof('computation')
            if which_computation in ['lambda', 'tensorflow']:
                return FederatingExecutorValue(value, type_spec)
            elif which_computation == 'reference':
                raise ValueError(
                    'Encountered an unexpected unbound references "{}".'.
                    format(value.reference.name))
            elif which_computation == 'intrinsic':
                intr = intrinsic_defs.uri_to_intrinsic_def(value.intrinsic.uri)
                if intr is None:
                    raise ValueError(
                        'Encountered an unrecognized intrinsic "{}".'.format(
                            value.intrinsic.uri))
                py_typecheck.check_type(intr, intrinsic_defs.IntrinsicDef)
                return await self.create_value(intr, type_spec)
            elif which_computation == 'placement':
                return await self.create_value(
                    placement_literals.uri_to_placement_literal(
                        value.placement.uri), type_spec)
            elif which_computation == 'call':
                parts = [value.call.function]
                if value.call.argument.WhichOneof('computation'):
                    parts.append(value.call.argument)
                parts = await asyncio.gather(
                    *[self.create_value(x) for x in parts])
                return await self.create_call(
                    parts[0], parts[1] if len(parts) > 1 else None)
            elif which_computation == 'tuple':
                element_values = await asyncio.gather(
                    *[self.create_value(x.value) for x in value.tuple.element])
                return await self.create_tuple(
                    anonymous_tuple.AnonymousTuple(
                        (e.name if e.name else None, v)
                        for e, v in zip(value.tuple.element, element_values)))
            elif which_computation == 'selection':
                which_selection = value.selection.WhichOneof('selection')
                if which_selection == 'name':
                    name = value.selection.name
                    index = None
                elif which_selection != 'index':
                    raise ValueError(
                        'Unrecognized selection type: "{}".'.format(
                            which_selection))
                else:
                    index = value.selection.index
                    name = None
                return await self.create_selection(await self.create_value(
                    value.selection.source),
                                                   index=index,
                                                   name=name)
            else:
                raise ValueError(
                    'Unsupported computation building block of type "{}".'.
                    format(which_computation))
        else:
            py_typecheck.check_type(type_spec, computation_types.Type)
            if isinstance(type_spec, computation_types.FunctionType):
                raise ValueError(
                    'Encountered a value of a functional TFF type {} and Python type '
                    '{} that is not of one of the recognized representations.'.
                    format(type_spec, py_typecheck.type_string(type(value))))
            elif isinstance(type_spec, computation_types.FederatedType):
                children = self._target_executors.get(type_spec.placement)
                self._check_value_compatible_with_placement(
                    value, type_spec.placement, type_spec.all_equal)
                if type_spec.all_equal:
                    value = [value for _ in children]
                child_vals = await asyncio.gather(*[
                    c.create_value(v, type_spec.member)
                    for v, c in zip(value, children)
                ])
                return FederatingExecutorValue(child_vals, type_spec)
            else:
                child = self._target_executors.get(None)
                if not child or len(child) > 1:
                    raise ValueError(
                        'Executor is not configured for unplaced values.')
                else:
                    return FederatingExecutorValue(
                        await child[0].create_value(value, type_spec),
                        type_spec)