Exemplo n.º 1
0
def create_dummy_value_unplaced():
    """Returns a Python value and unplaced type."""
    value = 10.0
    type_signature = computation_types.TensorType(tf.float32)
    return value, type_signature
Exemplo n.º 2
0
 def test_make_dummy_element_TensorType(self):
   type_spec = computation_types.TensorType(tf.float32,
                                            [None, 10, None, 10, 10])
   elem = tensorflow_utils.make_dummy_element_for_type_spec(type_spec)
   correct_elem = np.zeros([0, 10, 0, 10, 10], np.float32)
   self.assertAllClose(elem, correct_elem)
        ip = computation_utils.IterativeProcess(init_fn, next_fn)
        cf = canonical_form_utils.get_canonical_form_for_iterative_process(ip)
        self.assertIsInstance(cf, canonical_form.CanonicalForm)


INIT_TYPE = computation_types.FederatedType(tf.float32, placements.SERVER)
S1_TYPE = INIT_TYPE
C1_TYPE = computation_types.FederatedType(tf.float32, placements.CLIENTS)
S6_TYPE = computation_types.FederatedType(tf.float64, placements.SERVER)
S7_TYPE = computation_types.FederatedType(tf.bool, placements.SERVER)
C6_TYPE = computation_types.FederatedType(tf.int64, placements.CLIENTS)
S2_TYPE = computation_types.FederatedType([tf.float32], placements.SERVER)
C2_TYPE = computation_types.FederatedType(S2_TYPE.member, placements.CLIENTS)
C5_TYPE = computation_types.FederatedType([tf.float64], placements.CLIENTS)
ZERO_TYPE = computation_types.TensorType(tf.int64)
ACCUMULATE_TYPE = computation_types.FunctionType([ZERO_TYPE, C5_TYPE.member],
                                                 ZERO_TYPE)
MERGE_TYPE = computation_types.FunctionType([ZERO_TYPE, ZERO_TYPE], ZERO_TYPE)
REPORT_TYPE = computation_types.FunctionType(ZERO_TYPE, tf.int64)
S3_TYPE = computation_types.FederatedType(REPORT_TYPE.result,
                                          placements.SERVER)


def _create_next_type_with_s1_type(x):
    param_type = computation_types.NamedTupleType([x, C1_TYPE])
    result_type = computation_types.NamedTupleType([S6_TYPE, S7_TYPE, C6_TYPE])
    return computation_types.FunctionType(param_type, result_type)


def _create_before_broadcast_type_with_s1_type(x):
Exemplo n.º 4
0
def create_dummy_computation_intrinsic():
    value = pb.Computation(
        type=type_serialization.serialize_type(tf.int32),
        intrinsic=pb.Intrinsic(uri=intrinsic_defs.GENERIC_ZERO.uri))
    type_signature = computation_types.TensorType(tf.int32)
    return value, type_signature
Exemplo n.º 5
0
 def transform_to_scalar_type_spec(t):
     """Converts all `tff.TensorType` to scalar shapes."""
     if not t.is_tensor():
         return t, False
     return computation_types.TensorType(dtype=t.dtype, shape=[]), True
Exemplo n.º 6
0
    def test_init_does_not_raise_type_error_with_unknown_dimensions(self):
        server_state_type = computation_types.TensorType(shape=[None],
                                                         dtype=tf.int32)

        @computations.tf_computation
        def initialize():
            # Return a value of a type assignable to, but not equal to
            # `server_state_type`
            return tf.constant([1, 2, 3])

        @computations.tf_computation(server_state_type)
        def prepare(server_state):
            del server_state  # Unused
            return tf.constant(1.0)

        @computations.tf_computation(computation_types.SequenceType(
            tf.float32), tf.float32)
        def work(client_data, client_input):
            del client_data  # Unused
            del client_input  # Unused
            return True, []

        @computations.tf_computation
        def zero():
            return tf.constant([], dtype=tf.string)

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string),
            tf.bool)
        def accumulate(accumulator, client_update):
            del accumulator  # Unused
            del client_update  # Unused
            return tf.constant(['abc'])

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string),
            computation_types.TensorType(shape=[None], dtype=tf.string))
        def merge(accumulator1, accumulator2):
            del accumulator1  # Unused
            del accumulator2  # Unused
            return tf.constant(['abc'])

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string))
        def report(accumulator):
            del accumulator  # Unused
            return tf.constant(1.0)

        @computations.tf_computation
        def bitwidth():
            return []

        @computations.tf_computation(
            server_state_type, (tf.float32, computation_types.StructType([])))
        def update(server_state, global_update):
            del server_state  # Unused
            del global_update  # Unused
            # Return a new server state value whose type is assignable but not equal
            # to `server_state_type`, and which is different from the type returned
            # by `initialize`.
            return tf.constant([1]), []

        try:
            forms.MapReduceForm(initialize, prepare, work, zero, accumulate,
                                merge, report, bitwidth, update)
        except TypeError:
            self.fail('Raised TypeError unexpectedly.')
Exemplo n.º 7
0
def create_dummy_unplaced_type():
    value = 10
    type_signature = computation_types.TensorType(tf.int32)
    return value, type_signature
Exemplo n.º 8
0
def _convert_federated_to_tensor(type_spec):
    if isinstance(type_spec, computation_types.FederatedType):
        return computation_types.TensorType(tf.float32), True
    return type_spec, False
Exemplo n.º 9
0
def _convert_sequence_to_tensor(type_spec):
    if isinstance(type_spec, computation_types.SequenceType):
        return computation_types.TensorType(tf.float32), True
    return type_spec, False
Exemplo n.º 10
0
def _convert_abstract_type_to_tensor(type_spec):
    if isinstance(type_spec, computation_types.AbstractType):
        return computation_types.TensorType(tf.float32), True
    return type_spec, False
Exemplo n.º 11
0
def _convert_placement_type_to_tensor(type_spec):
    if isinstance(type_spec, computation_types.PlacementType):
        return computation_types.TensorType(tf.float32), True
    return type_spec, False
Exemplo n.º 12
0
def _convert_tensor_to_float(type_spec):
    if isinstance(type_spec, computation_types.TensorType):
        return computation_types.TensorType(tf.float32,
                                            shape=type_spec.shape), True
    return type_spec, False
Exemplo n.º 13
0
def to_representation_for_type(value, type_spec=None, device=None):
    """Verifies or converts the `value` to an eager objct matching `type_spec`.

  WARNING: This function is only partially implemented. It does not support
  data sets at this point.

  The output of this function is always an eager tensor, eager dataset, a
  representation of a TensorFlow computtion, or a nested structure of those
  that matches `type_spec`, and when `device` has been specified, everything
  is placed on that device on a best-effort basis.

  TensorFlow computations are represented here as zero- or one-argument Python
  callables that accept their entire argument bundle as a single Python object.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    type_spec: An instance of `tff.Type`, can be `None` for values that derive
      from `typed_object.TypedObject`.
    device: The optional device to place the value on (for tensor-level values).

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
  """
    if device is not None:
        py_typecheck.check_type(device, six.string_types)
        with tf.device(device):
            return to_representation_for_type(value,
                                              type_spec=type_spec,
                                              device=None)
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, typed_object.TypedObject):
        if type_spec is not None:
            if not type_utils.are_equivalent_types(value.type_signature,
                                                   type_spec):
                raise TypeError(
                    'Expected a value of type {}, found {}.'.format(
                        str(type_spec), str(value.type_signature)))
        else:
            type_spec = value.type_signature
    if type_spec is None:
        raise ValueError(
            'Cannot derive an eager representation for a value of an unknown type.'
        )
    if isinstance(value, EagerValue):
        return value.internal_representation
    if isinstance(value, executor_value_base.ExecutorValue):
        raise TypeError(
            'Cannot accept a value embedded within a non-eager executor.')
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value), type_spec,
            device)
    if isinstance(value, pb.Computation):
        return embed_tensorflow_computation(value, type_spec, device)
    if isinstance(type_spec, computation_types.TensorType):
        if not isinstance(value, tf.Tensor):
            value = tf.constant(value, type_spec.dtype, type_spec.shape)
        value_type = (computation_types.TensorType(value.dtype.base_dtype,
                                                   value.shape))
        if not type_utils.is_assignable_from(type_spec, value_type):
            raise TypeError(
                'The apparent type {} of a tensor {} does not match the expected '
                'type {}.'.format(str(value_type), str(value), str(type_spec)))
        return value
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elem = anonymous_tuple.to_elements(type_spec)
        value_elem = (anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value)))
        result_elem = []
        if len(type_elem) != len(value_elem):
            raise TypeError(
                'Expected a {}-element tuple, found {} elements.'.format(
                    str(len(type_elem)), str(len(value_elem))))
        for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):
            if t_name != v_name:
                raise TypeError(
                    'Mismatching element names in type vs. value: {} vs. {}.'.
                    format(t_name, v_name))
            el_repr = to_representation_for_type(el_val, el_type, device)
            result_elem.append((t_name, el_repr))
        return anonymous_tuple.AnonymousTuple(result_elem)
    else:
        raise TypeError('Unexpected type {}.'.format(str(type_spec)))
Exemplo n.º 14
0
def capture_result_from_graph(result, graph):
  """Captures a result stamped into a tf.Graph as a type signature and binding.

  Args:
    result: The result to capture, a Python object that is composed of tensors,
      possibly nested within Python structures such as dictionaries, lists,
      tuples, or named tuples.
    graph: The instance of tf.Graph to use.

  Returns:
    A tuple (type_spec, binding), where 'type_spec' is an instance of
    computation_types.Type that describes the type of the result, and 'binding'
    is an instance of TensorFlow.Binding that indicates how parts of the result
    type relate to the tensors and ops that appear in the result.

  Raises:
    TypeError: If the argument or any of its parts are of an uexpected type.
  """

  def _get_bindings_for_elements(name_value_pairs, graph, type_fn):
    """Build `(type_spec, binding)` tuple for name value pairs."""
    element_name_type_binding_triples = [
        ((k,) + capture_result_from_graph(v, graph))
        for k, v in name_value_pairs
    ]
    type_spec = type_fn([((e[0], e[1]) if e[0] else e[1])
                         for e in element_name_type_binding_triples])
    binding = pb.TensorFlow.Binding(
        struct=pb.TensorFlow.StructBinding(
            element=[e[2] for e in element_name_type_binding_triples]))
    return type_spec, binding

  # TODO(b/113112885): The emerging extensions for serializing SavedModels may
  # end up introducing similar concepts of bindings, etc., we should look here
  # into the possibility of reusing some of that code when it's available.
  if isinstance(result, TENSOR_REPRESENTATION_TYPES):
    with graph.as_default():
      result = tf.constant(result)
  if tf.is_tensor(result):
    if hasattr(result, 'read_value'):
      # We have a tf.Variable-like result, get a proper tensor to fetch.
      with graph.as_default():
        result = result.read_value()
    return (computation_types.TensorType(result.dtype.base_dtype, result.shape),
            pb.TensorFlow.Binding(
                tensor=pb.TensorFlow.TensorBinding(tensor_name=result.name)))
  elif py_typecheck.is_named_tuple(result):
    # Special handling needed for collections.namedtuples since they do not have
    # anything in the way of a shared base class. Note we don't want to rely on
    # the fact that collections.namedtuples inherit from 'tuple' because we'd be
    # failing to retain the information about naming of tuple members.
    # pylint: disable=protected-access
    name_value_pairs = result._asdict().items()
    # pylint: enable=protected-access
    return _get_bindings_for_elements(
        name_value_pairs, graph,
        functools.partial(
            computation_types.StructWithPythonType,
            container_type=type(result)))
  elif py_typecheck.is_attrs(result):
    name_value_pairs = attr.asdict(
        result, dict_factory=collections.OrderedDict, recurse=False)
    return _get_bindings_for_elements(
        name_value_pairs.items(), graph,
        functools.partial(
            computation_types.StructWithPythonType,
            container_type=type(result)))
  elif isinstance(result, structure.Struct):
    return _get_bindings_for_elements(
        structure.to_elements(result), graph, computation_types.StructType)
  elif isinstance(result, collections.abc.Mapping):
    if isinstance(result, collections.OrderedDict):
      name_value_pairs = result.items()
    else:
      name_value_pairs = sorted(result.items())
    return _get_bindings_for_elements(
        name_value_pairs, graph,
        functools.partial(
            computation_types.StructWithPythonType,
            container_type=type(result)))
  elif isinstance(result, (list, tuple)):
    element_type_binding_pairs = [
        capture_result_from_graph(e, graph) for e in result
    ]
    return (computation_types.StructWithPythonType(
        [e[0] for e in element_type_binding_pairs], type(result)),
            pb.TensorFlow.Binding(
                struct=pb.TensorFlow.StructBinding(
                    element=[e[1] for e in element_type_binding_pairs])))
  elif isinstance(result, type_conversions.TF_DATASET_REPRESENTATION_TYPES):
    variant_tensor = tf.data.experimental.to_variant(result)
    element_structure = result.element_spec
    try:
      element_type = computation_types.to_type(element_structure)
    except TypeError as e:
      raise TypeError(
          'TFF does not support Datasets that yield elements of structure {!s}'
          .format(element_structure)) from e
    return (computation_types.SequenceType(element_type),
            pb.TensorFlow.Binding(
                sequence=pb.TensorFlow.SequenceBinding(
                    variant_tensor_name=variant_tensor.name)))
  else:
    raise TypeError('Cannot capture a result of an unsupported type {}.'.format(
        py_typecheck.type_string(type(result))))
Exemplo n.º 15
0
 def test_tensor_type(self):
     s = computation_types.TensorType(tf.int32)
     t = computation_types.to_type(s)
     self.assertIsInstance(t, computation_types.TensorType)
     self.assertEqual(str(t), 'int32')
Exemplo n.º 16
0
def _convert_tuple_to_tensor(type_spec):
    if isinstance(type_spec, computation_types.NamedTupleType):
        return computation_types.TensorType(tf.float32), True
    return type_spec, False
Exemplo n.º 17
0
 def test_dtype_and_shape(self):
     t = computation_types.TensorType(tf.int32, [10])
     self.assertEqual(t.dtype, tf.int32)
     self.assertEqual(t.shape, tf.TensorShape([10]))
Exemplo n.º 18
0
 def foo():
     return intrinsic_utils.zero_for(
         computation_types.TensorType(tf.float32, [2, 3]),
         context_stack_impl.context_stack)
Exemplo n.º 19
0
def create_dummy_computation_reference():
    value = pb.Computation(type=type_serialization.serialize_type(tf.int32),
                           reference=pb.Reference(name='a'))
    type_signature = computation_types.TensorType(tf.int32)
    return value, type_signature
Exemplo n.º 20
0
class TypeUtilsTest(test_case.TestCase, parameterized.TestCase):
    def test_to_canonical_value_with_none(self):
        self.assertIsNone(type_utils.to_canonical_value(None))

    def test_to_canonical_value_with_int(self):
        self.assertEqual(type_utils.to_canonical_value(1), 1)

    def test_to_canonical_value_with_float(self):
        self.assertEqual(type_utils.to_canonical_value(1.0), 1.0)

    def test_to_canonical_value_with_bool(self):
        self.assertEqual(type_utils.to_canonical_value(True), True)
        self.assertEqual(type_utils.to_canonical_value(False), False)

    def test_to_canonical_value_with_string(self):
        self.assertEqual(type_utils.to_canonical_value('a'), 'a')

    def test_to_canonical_value_with_list_of_ints(self):
        self.assertEqual(type_utils.to_canonical_value([1, 2, 3]), [1, 2, 3])

    def test_to_canonical_value_with_list_of_floats(self):
        self.assertEqual(type_utils.to_canonical_value([1.0, 2.0, 3.0]),
                         [1.0, 2.0, 3.0])

    def test_to_canonical_value_with_list_of_bools(self):
        self.assertEqual(type_utils.to_canonical_value([True, False]),
                         [True, False])

    def test_to_canonical_value_with_list_of_strings(self):
        self.assertEqual(type_utils.to_canonical_value(['a', 'b', 'c']),
                         ['a', 'b', 'c'])

    def test_to_canonical_value_with_list_of_dict(self):
        self.assertEqual(type_utils.to_canonical_value([{
            'a': 1,
            'b': 0.1,
        }]), [structure.Struct([
            ('a', 1),
            ('b', 0.1),
        ])])

    def test_to_canonical_value_with_list_of_ordered_dict(self):
        self.assertEqual(
            type_utils.to_canonical_value(
                [collections.OrderedDict([
                    ('a', 1),
                    ('b', 0.1),
                ])]), [structure.Struct([
                    ('a', 1),
                    ('b', 0.1),
                ])])

    def test_to_canonical_value_with_dict(self):
        self.assertEqual(type_utils.to_canonical_value({
            'a': 1,
            'b': 0.1,
        }), structure.Struct([
            ('a', 1),
            ('b', 0.1),
        ]))
        self.assertEqual(type_utils.to_canonical_value({
            'b': 0.1,
            'a': 1,
        }), structure.Struct([
            ('a', 1),
            ('b', 0.1),
        ]))

    def test_to_canonical_value_with_ordered_dict(self):
        self.assertEqual(
            type_utils.to_canonical_value(
                collections.OrderedDict([
                    ('a', 1),
                    ('b', 0.1),
                ])), structure.Struct([
                    ('a', 1),
                    ('b', 0.1),
                ]))
        self.assertEqual(
            type_utils.to_canonical_value(
                collections.OrderedDict([
                    ('b', 0.1),
                    ('a', 1),
                ])), structure.Struct([
                    ('b', 0.1),
                    ('a', 1),
                ]))

    # pyformat: disable
    @parameterized.named_parameters([
        ('buiding_block_and_type_spec',
         building_block_factory.create_compiled_identity(
             computation_types.TensorType(tf.int32)),
         computation_types.FunctionType(tf.int32, tf.int32),
         computation_types.FunctionType(tf.int32, tf.int32)),
        ('buiding_block_and_none',
         building_block_factory.create_compiled_identity(
             computation_types.TensorType(tf.int32)), None,
         computation_types.FunctionType(tf.int32, tf.int32)),
        ('int_and_type_spec', 10, computation_types.TensorType(tf.int32),
         computation_types.TensorType(tf.int32)),
    ])
    # pyformat: enable
    def test_reconcile_value_with_type_spec_returns_type(
            self, value, type_spec, expected_type):
        actual_type = type_utils.reconcile_value_with_type_spec(
            value, type_spec)
        self.assertEqual(actual_type, expected_type)

    # pyformat: disable
    @parameterized.named_parameters([
        ('building_block_and_bad_type_spec',
         building_block_factory.create_compiled_identity(
             computation_types.TensorType(tf.int32)),
         computation_types.TensorType(tf.int32)),
        ('int_and_none', 10, None),
    ])
    # pyformat: enable
    def test_reconcile_value_with_type_spec_raises_type_error(
            self, value, type_spec):
        with self.assertRaises(TypeError):
            type_utils.reconcile_value_with_type_spec(value, type_spec)

    # pyformat: disable
    @parameterized.named_parameters([
        ('value_type_and_type_spec', computation_types.TensorType(tf.int32),
         computation_types.TensorType(tf.int32),
         computation_types.TensorType(tf.int32)),
        ('value_type_and_none', computation_types.TensorType(tf.int32), None,
         computation_types.TensorType(tf.int32)),
    ])
    # pyformat: enable
    def test_reconcile_value_type_with_type_spec_returns_type(
            self, value_type, type_spec, expected_type):
        actual_type = type_utils.reconcile_value_type_with_type_spec(
            value_type, type_spec)
        self.assertEqual(actual_type, expected_type)

    def test_reconcile_value_type_with_type_spec_raises_type_error_value_type_and_bad_type_spec(
            self):
        value_type = computation_types.TensorType(tf.int32)
        type_spec = computation_types.TensorType(tf.string)

        with self.assertRaises(TypeError):
            type_utils.reconcile_value_type_with_type_spec(
                value_type, type_spec)
Exemplo n.º 21
0
    def test_raises_type_error_with_value_and_bad_type(self, value, _):
        executor = create_test_executor(num_clients=3)
        bad_type_signature = computation_types.TensorType(tf.string)

        with self.assertRaises(TypeError):
            self.run_sync(executor.create_value(value, bad_type_signature))
Exemplo n.º 22
0
def to_representation_for_type(
        value: Any,
        tf_function_cache: MutableMapping[str, Any],
        type_spec: Optional[computation_types.Type] = None,
        device: Optional[tf.config.LogicalDevice] = None) -> Any:
    """Verifies or converts the `value` to an eager object matching `type_spec`.

  WARNING: This function is only partially implemented. It does not support
  data sets at this point.

  The output of this function is always an eager tensor, eager dataset, a
  representation of a TensorFlow computation, or a nested structure of those
  that matches `type_spec`, and when `device` has been specified, everything
  is placed on that device on a best-effort basis.

  TensorFlow computations are represented here as zero- or one-argument Python
  callables that accept their entire argument bundle as a single Python object.

  Args:
    value: The raw representation of a value to compare against `type_spec` and
      potentially to be converted.
    tf_function_cache: A cache obeying `dict` semantics that can be used to look
      up previously embedded TensorFlow functions.
    type_spec: An instance of `tff.Type`, can be `None` for values that derive
      from `typed_object.TypedObject`.
    device: An optional `tf.config.LogicalDevice` to place the value on (for
      tensor-level values).

  Returns:
    Either `value` itself, or a modified version of it.

  Raises:
    TypeError: If the `value` is not compatible with `type_spec`.
  """
    type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
    if isinstance(value, computation_base.Computation):
        return to_representation_for_type(
            computation_impl.ComputationImpl.get_proto(value),
            tf_function_cache, type_spec, device)
    elif isinstance(value, pb.Computation):
        key = (value.SerializeToString(), str(type_spec),
               device.name if device else None)
        cached_fn = tf_function_cache.get(key)
        if cached_fn is not None:
            return cached_fn
        embedded_fn = embed_tensorflow_computation(value, type_spec, device)
        tf_function_cache[key] = embedded_fn
        return embedded_fn
    elif type_spec.is_struct():
        type_elem = structure.to_elements(type_spec)
        value_elem = (structure.to_elements(structure.from_container(value)))
        result_elem = []
        if len(type_elem) != len(value_elem):
            raise TypeError(
                'Expected a {}-element tuple, found {} elements.'.format(
                    len(type_elem), len(value_elem)))
        for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):
            if t_name != v_name:
                raise TypeError(
                    'Mismatching element names in type vs. value: {} vs. {}.'.
                    format(t_name, v_name))
            el_repr = to_representation_for_type(el_val, tf_function_cache,
                                                 el_type, device)
            result_elem.append((t_name, el_repr))
        return structure.Struct(result_elem)
    elif device is not None:
        py_typecheck.check_type(device, tf.config.LogicalDevice)
        with tf.device(device.name):
            return to_representation_for_type(value,
                                              tf_function_cache,
                                              type_spec=type_spec,
                                              device=None)
    elif isinstance(value, EagerValue):
        return value.internal_representation
    elif isinstance(value, executor_value_base.ExecutorValue):
        raise TypeError(
            'Cannot accept a value embedded within a non-eager executor.')
    elif type_spec.is_tensor():
        if not tf.is_tensor(value):
            value = tf.convert_to_tensor(value, dtype=type_spec.dtype)
        elif hasattr(value, 'read_value'):
            # a tf.Variable-like result, get a proper tensor.
            value = value.read_value()
        value_type = (computation_types.TensorType(value.dtype.base_dtype,
                                                   value.shape))
        if not type_spec.is_assignable_from(value_type):
            raise TypeError(
                'The apparent type {} of a tensor {} does not match the expected '
                'type {}.'.format(value_type, value, type_spec))
        return value
    elif type_spec.is_sequence():
        if isinstance(value, list):
            value = tensorflow_utils.make_data_set_from_elements(
                None, value, type_spec.element)
        py_typecheck.check_type(
            value, type_conversions.TF_DATASET_REPRESENTATION_TYPES)
        element_type = computation_types.to_type(value.element_spec)
        value_type = computation_types.SequenceType(element_type)
        type_spec.check_assignable_from(value_type)
        return value
    else:
        raise TypeError('Unexpected type {}.'.format(type_spec))
Exemplo n.º 23
0
# limitations under the License.

import collections

import attr
import tensorflow as tf

from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.utils import function_utils
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper

tffint32 = computation_types.TensorType(tf.int32)

tffstring = computation_types.TensorType(tf.string)


def build_zero_argument(parameter_type):
  if parameter_type is None:
    return None
  elif parameter_type.is_struct():
    return structure.map_structure(build_zero_argument, parameter_type)
  elif parameter_type == tffint32:
    return 0
  elif parameter_type == tffstring:
    return ''
  else:
    raise NotImplementedError(f'Unsupported type: {parameter_type}')
Exemplo n.º 24
0
def capture_result_from_graph(result, graph):
  """Captures a result stamped into a tf.Graph as a type signature and binding.

  Args:
    result: The result to capture, a Python object that is composed of tensors,
      possibly nested within Python structures such as dictionaries, lists,
      tuples, or named tuples.
    graph: The instance of tf.Graph to use.

  Returns:
    A tuple (type_spec, binding), where 'type_spec' is an instance of
    computation_types.Type that describes the type of the result, and 'binding'
    is an instance of TensorFlow.Binding that indicates how parts of the result
    type relate to the tensors and ops that appear in the result.

  Raises:
    TypeError: If the argument or any of its parts are of an uexpected type.
  """

  def _get_bindings_for_elements(name_value_pairs, graph, type_fn):
    """Build `(type_spec, binding)` tuple for name value pairs."""
    element_name_type_binding_triples = [
        ((k,) + capture_result_from_graph(v, graph))
        for k, v in name_value_pairs
    ]
    type_spec = type_fn([((e[0], e[1]) if e[0] else e[1])
                         for e in element_name_type_binding_triples])
    binding = pb.TensorFlow.Binding(
        tuple=pb.TensorFlow.NamedTupleBinding(
            element=[e[2] for e in element_name_type_binding_triples]))
    return type_spec, binding

  # TODO(b/113112885): The emerging extensions for serializing SavedModels may
  # end up introducing similar concepts of bindings, etc., we should look here
  # into the possibility of reusing some of that code when it's available.
  if isinstance(result, dtype_utils.TENSOR_REPRESENTATION_TYPES):
    with graph.as_default():
      result = tf.constant(result)
  if tf.is_tensor(result):
    if hasattr(result, 'read_value'):
      # We have a tf.Variable-like result, get a proper tensor to fetch.
      with graph.as_default():
        result = result.read_value()
    return (computation_types.TensorType(result.dtype.base_dtype, result.shape),
            pb.TensorFlow.Binding(
                tensor=pb.TensorFlow.TensorBinding(tensor_name=result.name)))
  elif py_typecheck.is_named_tuple(result):
    # Special handling needed for collections.namedtuples since they do not have
    # anything in the way of a shared base class. Note we don't want to rely on
    # the fact that collections.namedtuples inherit from 'tuple' because we'd be
    # failing to retain the information about naming of tuple members.
    # pylint: disable=protected-access
    name_value_pairs = six.iteritems(result._asdict())
    # pylint: enable=protected-access
    return _get_bindings_for_elements(
        name_value_pairs, graph,
        functools.partial(
            computation_types.NamedTupleTypeWithPyContainerType,
            container_type=type(result)))
  elif py_typecheck.is_attrs(result):
    name_value_pairs = attr.asdict(
        result, dict_factory=collections.OrderedDict, recurse=False)
    return _get_bindings_for_elements(
        six.iteritems(name_value_pairs), graph,
        functools.partial(
            computation_types.NamedTupleTypeWithPyContainerType,
            container_type=type(result)))
  elif isinstance(result, anonymous_tuple.AnonymousTuple):
    return _get_bindings_for_elements(
        anonymous_tuple.to_elements(result), graph,
        computation_types.NamedTupleType)
  elif isinstance(result, collections.Mapping):
    if isinstance(result, collections.OrderedDict):
      name_value_pairs = six.iteritems(result)
    else:
      name_value_pairs = sorted(six.iteritems(result))
    return _get_bindings_for_elements(
        name_value_pairs, graph,
        functools.partial(
            computation_types.NamedTupleTypeWithPyContainerType,
            container_type=type(result)))
  elif isinstance(result, (list, tuple)):
    element_type_binding_pairs = [
        capture_result_from_graph(e, graph) for e in result
    ]
    return (computation_types.NamedTupleTypeWithPyContainerType(
        [e[0] for e in element_type_binding_pairs], type(result)),
            pb.TensorFlow.Binding(
                tuple=pb.TensorFlow.NamedTupleBinding(
                    element=[e[1] for e in element_type_binding_pairs])))
  elif isinstance(result,
                  (tf.compat.v1.data.Dataset, tf.compat.v2.data.Dataset)):
    variant_tensor = tf.data.experimental.to_variant(result)
    # TODO(b/130032140): Switch to TF2.0 way of doing it while cleaning up the
    # legacy structures all over the code base and replacing them with the new
    # tf.data.experimenta.Structure variants.
    element_type = type_utils.tf_dtypes_and_shapes_to_type(
        tf.compat.v1.data.get_output_types(result),
        tf.compat.v1.data.get_output_shapes(result))
    return (computation_types.SequenceType(element_type),
            pb.TensorFlow.Binding(
                sequence=pb.TensorFlow.SequenceBinding(
                    variant_tensor_name=variant_tensor.name)))
  elif isinstance(result, OneShotDataset):
    # TODO(b/129956296): Eventually delete this deprecated code path.
    element_type = type_utils.tf_dtypes_and_shapes_to_type(
        tf.compat.v1.data.get_output_types(result),
        tf.compat.v1.data.get_output_shapes(result))
    handle_name = result.make_one_shot_iterator().string_handle().name
    return (computation_types.SequenceType(element_type),
            pb.TensorFlow.Binding(
                sequence=pb.TensorFlow.SequenceBinding(
                    iterator_string_handle_name=handle_name)))
  else:
    raise TypeError('Cannot capture a result of an unsupported type {}.'.format(
        py_typecheck.type_string(type(result))))
Exemplo n.º 25
0
 def test_coerce_dataset_elements_noop(self):
   x = tf.data.Dataset.range(5)
   y = tensorflow_utils.coerce_dataset_elements_to_tff_type_spec(
       x, computation_types.TensorType(tf.int64))
   self.assertEqual(x.element_spec, y.element_spec)
Exemplo n.º 26
0
 def test_make_dummy_element_TensorType(self):
     type_spec = computation_types.TensorType(tf.float32,
                                              [None, 10, None, 10, 10])
     elem = graph_utils._make_dummy_element_for_type_spec(type_spec)
     correct_elem = np.zeros([0, 10, 0, 10, 10], np.float32)
     self.assertTrue(np.array_equal(elem, correct_elem))
Exemplo n.º 27
0
def deserialize_type(
    type_proto: Optional[pb.Type]) -> Optional[computation_types.Type]:
  """Deserializes 'type_proto' as a computation_types.Type.

  Note: Currently only deserialization for tensor, named tuple, sequence, and
  function types is implemented.

  Args:
    type_proto: An instance of pb.Type or None.

  Returns:
    The corresponding instance of computation_types.Type (or None if the
    argument was None).

  Raises:
    TypeError: if the argument is of the wrong type.
    NotImplementedError: for type variants for which deserialization is not
      implemented.
  """
  if type_proto is None:
    return None
  py_typecheck.check_type(type_proto, pb.Type)
  type_variant = type_proto.WhichOneof('type')
  if type_variant is None:
    return None
  elif type_variant == 'tensor':
    tensor_proto = type_proto.tensor
    return computation_types.TensorType(
        dtype=tf.dtypes.as_dtype(tensor_proto.dtype),
        shape=_to_tensor_shape(tensor_proto))
  elif type_variant == 'sequence':
    return computation_types.SequenceType(
        deserialize_type(type_proto.sequence.element))
  elif type_variant == 'struct':

    def empty_str_to_none(s):
      if s == '':  # pylint: disable=g-explicit-bool-comparison
        return None
      return s

    return computation_types.StructType(
        [(empty_str_to_none(e.name), deserialize_type(e.value))
         for e in type_proto.struct.element],
        convert=False)
  elif type_variant == 'function':
    return computation_types.FunctionType(
        parameter=deserialize_type(type_proto.function.parameter),
        result=deserialize_type(type_proto.function.result))
  elif type_variant == 'placement':
    return computation_types.PlacementType()
  elif type_variant == 'federated':
    placement_oneof = type_proto.federated.placement.WhichOneof('placement')
    if placement_oneof == 'value':
      return computation_types.FederatedType(
          member=deserialize_type(type_proto.federated.member),
          placement=placements.uri_to_placement_literal(
              type_proto.federated.placement.value.uri),
          all_equal=type_proto.federated.all_equal)
    else:
      raise NotImplementedError(
          'Deserialization of federated types with placement spec as {} '
          'is not currently implemented yet.'.format(placement_oneof))
  else:
    raise NotImplementedError('Unknown type variant {}.'.format(type_variant))
Exemplo n.º 28
0
class CheckWellFormedTest(parameterized.TestCase):

  # pyformat: disable
  @parameterized.named_parameters([
      ('abstract_type',
       computation_types.AbstractType('T')),
      ('federated_type',
       computation_types.FederatedType(tf.int32, placement_literals.CLIENTS)),
      ('function_type',
       computation_types.FunctionType(tf.int32, tf.int32)),
      ('named_tuple_type',
       computation_types.NamedTupleType([tf.int32] * 3)),
      ('placement_type',
       computation_types.PlacementType()),
      ('sequence_type',
       computation_types.SequenceType(tf.int32)),
      ('tensor_type',
       computation_types.TensorType(tf.int32)),
  ])
  # pyformat: enable
  def test_does_not_raise_type_error(self, type_signature):
    try:
      type_analysis.check_well_formed(type_signature)
    except TypeError:
      self.fail('Raised TypeError unexpectedly.')

  @parameterized.named_parameters([
      ('federated_function_type',
       computation_types.FederatedType(
           computation_types.FunctionType(tf.int32, tf.int32),
           placement_literals.CLIENTS)),
      ('federated_federated_type',
       computation_types.FederatedType(
           computation_types.FederatedType(tf.int32,
                                           placement_literals.CLIENTS),
           placement_literals.CLIENTS)),
      ('sequence_sequence_type',
       computation_types.SequenceType(
           computation_types.SequenceType([tf.int32]))),
      ('sequence_federated_type',
       computation_types.SequenceType(
           computation_types.FederatedType(tf.int32,
                                           placement_literals.CLIENTS))),
      ('tuple_federated_function_type',
       computation_types.NamedTupleType([
           computation_types.FederatedType(
               computation_types.FunctionType(tf.int32, tf.int32),
               placement_literals.CLIENTS)
       ])),
      ('tuple_federated_federated_type',
       computation_types.NamedTupleType([
           computation_types.FederatedType(
               computation_types.FederatedType(tf.int32,
                                               placement_literals.CLIENTS),
               placement_literals.CLIENTS)
       ])),
      ('federated_tuple_function_type',
       computation_types.FederatedType(
           computation_types.NamedTupleType(
               [computation_types.FunctionType(tf.int32, tf.int32)]),
           placement_literals.CLIENTS)),
  ])
  # pyformat: enable
  def test_raises_type_error(self, type_signature):
    with self.assertRaises(TypeError):
      type_analysis.check_well_formed(type_signature)
Exemplo n.º 29
0
 def test_with_int_vector(self):
   type_signature = computation_types.TensorType(tf.int32, [10])
   dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(
       type_signature)
   self.assert_nested_struct_eq(dtypes, tf.int32)
   self.assert_nested_struct_eq(shapes, tf.TensorShape([10]))
Exemplo n.º 30
0
def create_dummy_computation_tensorflow_identity():
    """Returns a tensorflow computation and type `(float32 -> float32)`."""
    tensor_type = computation_types.TensorType(tf.float32)
    value = tensorflow_computation_factory.create_identity(tensor_type)
    type_signature = computation_types.FunctionType(tensor_type, tensor_type)
    return value, type_signature