예제 #1
0
    def test_serialize_tensorflow_with_data_set_sum_lambda(self):
        def _legacy_dataset_reducer_example(ds):
            return ds.reduce(np.int64(0), lambda x, y: x + y)

        comp = tensorflow_serialization.serialize_py_fn_as_tf_computation(
            _legacy_dataset_reducer_example,
            computation_types.SequenceType(tf.int64),
            context_stack_impl.context_stack)
        self.assertEqual(str(type_serialization.deserialize_type(comp.type)),
                         '(int64* -> int64)')
        self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')
        parameter = tf.data.Dataset.range(5)
        results = tf.Session().run(
            tf.import_graph_def(
                serialization_utils.unpack_graph_def(
                    comp.tensorflow.graph_def),
                {
                    comp.tensorflow.parameter.sequence.iterator_string_handle_name:
                    (parameter.make_one_shot_iterator().string_handle())
                }, [comp.tensorflow.result.tensor.tensor_name]))
        self.assertEqual(results, [10])
    def test_federated_collect_with_map_call(self):
        @computations.tf_computation()
        def make_dataset():
            return tf.data.Dataset.range(5)

        @computations.tf_computation(computation_types.SequenceType(tf.int64))
        def foo(x):
            return x.reduce(tf.constant(0, dtype=tf.int64), lambda a, b: a + b)

        @computations.federated_computation()
        def bar():
            x = intrinsics.federated_value(make_dataset(),
                                           placement_literals.CLIENTS)
            return intrinsics.federated_map(
                foo,
                intrinsics.federated_collect(intrinsics.federated_map(foo, x)))

        result = _run_test_comp_produces_federated_value(self,
                                                         bar,
                                                         num_clients=5)
        self.assertEqual(result.numpy(), 50)
예제 #3
0
 def test_assemble_result_from_graph_with_sequence_of_odicts(self):
     type_spec = computation_types.SequenceType(
         collections.OrderedDict([('X', tf.int32), ('Y', tf.int32)]))
     binding = pb.TensorFlow.Binding(sequence=pb.TensorFlow.SequenceBinding(
         iterator_string_handle_name='foo'))
     data_set = tf.data.Dataset.from_tensors({
         'X': tf.constant(1),
         'Y': tf.constant(2)
     })
     it = data_set.make_one_shot_iterator()
     output_map = {'foo': it.string_handle()}
     result = graph_utils.assemble_result_from_graph(
         type_spec, binding, output_map)
     self.assertIsInstance(result, graph_utils.DATASET_REPRESENTATION_TYPES)
     self.assertEqual(
         str(result.output_types),
         'OrderedDict([(\'X\', tf.int32), (\'Y\', tf.int32)])')
     self.assertEqual(
         str(result.output_shapes),
         'OrderedDict([(\'X\', TensorShape([])), (\'Y\', TensorShape([]))])'
     )
예제 #4
0
def _create_called_sequence_map(fn, arg):
    r"""Creates a to call a sequence map.

            Call
           /    \
  Intrinsic      Tuple
                /     \
            Comp       Comp

  Args:
    fn: A functional `computation_building_blocks.ComputationBuildingBlock` to
      use as the function.
    arg: A `computation_building_blocks.ComputationBuildingBlock` to use as the
      argument.

  Returns:
    A `computation_building_blocks.Call`.

  Raises:
    TypeError: If `fn` or `arg` is not a
    `computation_building_blocks.ComputationBuildingBlock` or if `fn` has a
    parameter type that is not assignable from `arg` type.
  """
    py_typecheck.check_type(
        fn, computation_building_blocks.ComputationBuildingBlock)
    py_typecheck.check_type(
        arg, computation_building_blocks.ComputationBuildingBlock)
    if not type_utils.is_assignable_from(fn.parameter_type,
                                         arg.type_signature.element):
        raise TypeError(
            'The parameter of the function is of type {}, and the argument is of '
            'an incompatible type {}.'.format(str(fn.parameter_type),
                                              str(arg.type_signature.element)))
    result_type = computation_types.SequenceType(fn.type_signature.result)
    intrinsic_type = computation_types.FunctionType(
        [fn.type_signature, arg.type_signature], result_type)
    intrinsic = computation_building_blocks.Intrinsic(
        intrinsic_defs.SEQUENCE_MAP.uri, intrinsic_type)
    tup = computation_building_blocks.Tuple((fn, arg))
    return computation_building_blocks.Call(intrinsic, tup)
예제 #5
0
 def test_infer_cardinalities_success_structure(self):
     foo = cardinalities_utils.infer_cardinalities(
         structure.Struct([('A', [1, 2, 3]),
                           ('B',
                            structure.Struct([('C', [[1, 2], [3, 4], [5,
                                                                      6]]),
                                              ('D', [True, False,
                                                     True])]))]),
         computation_types.StructType([
             ('A',
              computation_types.FederatedType(tf.int32,
                                              placements.CLIENTS)),
             ('B',
              [('C',
                computation_types.FederatedType(
                    computation_types.SequenceType(tf.int32),
                    placements.CLIENTS)),
               ('D',
                computation_types.FederatedType(tf.bool,
                                                placements.CLIENTS))])
         ]))
     self.assertDictEqual(foo, {placements.CLIENTS: 3})
    def test_graph_mode_dataset_fails_well(self):
        sequence_type = computation_types.SequenceType(tf.int32)
        federated_type = computation_types.FederatedType(
            sequence_type, placements.CLIENTS)

        with tf.Graph().as_default():

            @computations.tf_computation(sequence_type)
            def foo(z):
                value1 = z.reduce(0, lambda x, y: x + y)
                return value1

            @computations.federated_computation(federated_type)
            def bar(x):
                return intrinsics.federated_map(foo, x)

            ds1 = tf.data.Dataset.from_tensor_slices([10, 20])
            ds2 = tf.data.Dataset.from_tensor_slices([30, 40])
            with self.assertRaisesRegexp(
                    ValueError,
                    'outside of eager mode is not currently supported.'):
                bar([ds1, ds2])
예제 #7
0
class IsStructureOfIntegersTest(parameterized.TestCase):
    @parameterized.named_parameters(
        ('int', tf.int32),
        ('ints', ([tf.int32, tf.int32], )),
        ('federated_int_at_clients',
         computation_types.FederatedType(tf.int32,
                                         placement_literals.CLIENTS)),
    )
    def test_returns_true(self, type_spec):
        self.assertTrue(type_analysis.is_structure_of_integers(type_spec))

    @parameterized.named_parameters(
        ('bool', tf.bool),
        ('string', tf.string),
        ('int_and_bool', ([tf.int32, tf.bool], )),
        ('sequence_of_ints', computation_types.SequenceType(tf.int32)),
        ('placement', computation_types.PlacementType()),
        ('function', computation_types.FunctionType(tf.int32, tf.int32)),
        ('abstract', computation_types.AbstractType('T')),
    )
    def test_returns_false(self, type_spec):
        self.assertFalse(type_analysis.is_structure_of_integers(type_spec))
예제 #8
0
 def test_infer_cardinalities_success_anonymous_tuple(self):
     foo = cardinalities_utils.infer_cardinalities(
         anonymous_tuple.AnonymousTuple([
             ('A', [1, 2, 3]),
             ('B',
              anonymous_tuple.AnonymousTuple([('C', [[1, 2], [3, 4], [5,
                                                                      6]]),
                                              ('D', [True, False, True])]))
         ]),
         computation_types.to_type([
             ('A',
              computation_types.FederatedType(tf.int32,
                                              placement_literals.CLIENTS)),
             ('B', [('C',
                     computation_types.FederatedType(
                         computation_types.SequenceType(tf.int32),
                         placement_literals.CLIENTS)),
                    ('D',
                     computation_types.FederatedType(
                         tf.bool, placement_literals.CLIENTS))])
         ]))
     self.assertDictEqual(foo, {placement_literals.CLIENTS: 3})
예제 #9
0
  def test_serialize_tensorflow_with_data_set_sum_lambda(self):

    def _legacy_dataset_reducer_example(ds):
      return ds.reduce(np.int64(0), lambda x, y: x + y)

    comp, extra_type_spec = tensorflow_serialization.serialize_py_fn_as_tf_computation(
        _legacy_dataset_reducer_example,
        computation_types.SequenceType(tf.int64),
        context_stack_impl.context_stack)
    self.assertEqual(
        str(type_serialization.deserialize_type(comp.type)),
        '(int64* -> int64)')
    self.assertEqual(str(extra_type_spec), '(int64* -> int64)')
    self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')
    parameter = tf.data.Dataset.range(5)
    results = tf.compat.v1.Session().run(
        tf.import_graph_def(
            serialization_utils.unpack_graph_def(comp.tensorflow.graph_def), {
                comp.tensorflow.parameter.sequence.variant_tensor_name:
                    tf.data.experimental.to_variant(parameter)
            }, [comp.tensorflow.result.tensor.tensor_name]))
    self.assertEqual(results, [10])
  def test_computation_with_int_sequence_raises(self):
    ds1_shape = tf.TensorShape([None])
    sequence_type = computation_types.SequenceType(
        computation_types.TensorType(tf.int32, ds1_shape))
    federated_type = computation_types.FederatedType(sequence_type,
                                                     placements.CLIENTS)

    @computations.tf_computation(sequence_type)
    def foo(z):
      value1 = z.reduce(0, lambda x, y: x + y)
      return value1

    @computations.federated_computation(federated_type)
    def bar(x):
      return intrinsics.federated_map(foo, x)

    ds1 = tf.data.Dataset.from_tensor_slices([10, 20]).batch(1)
    ds2 = tf.data.Dataset.from_tensor_slices([30, 40]).batch(1)

    with self.assertRaisesRegexp(ValueError, 'Please pass a list'):
      bar(ds1)
      bar(ds2)
예제 #11
0
def _serialize_sequence_value(
    value: Union[type_conversions.TF_DATASET_REPRESENTATION_TYPES],
    type_spec: computation_types.SequenceType) -> _SerializeReturnType:
  """Serializes a `tf.data.Dataset` value into `executor_pb2.Value`.

  Args:
    value: A `tf.data.Dataset`, or equivalent.
    type_spec: A `computation_types.Type` specifying the TFF sequence type of
      `value.`

  Returns:
    A tuple `(value_proto, type_spec)` in which `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and
    `type_spec` is the type of the serialized value.
  """
  if not isinstance(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES):
    raise TypeError(
        'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
            py_typecheck.type_string(type(value)),
            type_spec if type_spec is not None else 'unknown'))

  value_type = computation_types.SequenceType(
      computation_types.to_type(value.element_spec))
  if not type_spec.is_assignable_from(value_type):
    raise TypeError(
        'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
        .format(value_type, type_spec if type_spec is not None else 'unknown'))

  # TFF must store the type spec here because TF will lose the ordering of the
  # names for `tf.data.Dataset` that return elements of
  # `collections.abc.Mapping` type. This allows TFF to preserve and restore the
  # key ordering upon deserialization.
  element_type = computation_types.to_type(value.element_spec)
  return executor_pb2.Value(
      sequence=executor_pb2.Value.Sequence(
          zipped_saved_model=_serialize_dataset(value),
          element_type=type_serialization.serialize_type(
              element_type))), type_spec
예제 #12
0
class CreateIdentityTest(parameterized.TestCase):

    # pyformat: disable
    @parameterized.named_parameters(
        ('int', computation_types.TensorType(tf.int32), 10),
        ('unnamed_tuple', computation_types.StructType([tf.int32, tf.float32]),
         structure.Struct([(None, 10), (None, 10.0)])),
        ('named_tuple',
         computation_types.StructType([
             ('a', tf.int32), ('b', tf.float32)
         ]), structure.Struct([('a', 10), ('b', 10.0)])),
        ('sequence', computation_types.SequenceType(tf.int32), [10] * 3),
    )
    # pyformat: enable
    def test_returns_computation(self, type_signature, value):
        proto, _ = tensorflow_computation_factory.create_identity(
            type_signature)

        self.assertIsInstance(proto, pb.Computation)
        actual_type = type_serialization.deserialize_type(proto.type)
        expected_type = type_factory.unary_op(type_signature)
        self.assertEqual(actual_type, expected_type)
        actual_result = test_utils.run_tensorflow(proto, value)
        self.assertEqual(actual_result, value)

    @parameterized.named_parameters(
        ('none', None),
        ('federated_type', computation_types.at_server(tf.int32)),
    )
    def test_raises_type_error(self, type_signature):
        with self.assertRaises(TypeError):
            tensorflow_computation_factory.create_identity(type_signature)

    def test_feeds_and_fetches_different(self):
        proto, _ = tensorflow_computation_factory.create_identity(
            computation_types.TensorType(tf.int32))
        self.assertNotEqual(proto.tensorflow.parameter,
                            proto.tensorflow.result)
예제 #13
0
  def test_with_incomplete_temperature_sensor_example(self):

    @computations.federated_computation(
        type_constructors.at_clients(
            computation_types.SequenceType(tf.float32)),
        type_constructors.at_server(tf.float32))
    def comp(temperatures, threshold):

      @computations.tf_computation(
          computation_types.SequenceType(tf.float32), tf.float32)
      def count(ds, t):
        return ds.reduce(
            np.int32(0), lambda n, x: n + tf.cast(tf.greater(x, t), tf.int32))

      return intrinsics.federated_map(
          count,
          intrinsics.federated_zip(
              [temperatures,
               intrinsics.federated_broadcast(threshold)]))

    num_clients = 10

    set_default_executor.set_default_executor(
        executor_stacks.create_local_executor(num_clients))

    temperatures = [
        tf.data.Dataset.range(1000).map(lambda x: tf.cast(x, tf.float32))
        for _ in range(num_clients)
    ]

    threshold = 100.0

    result = comp(temperatures, threshold)

    self.assertCountEqual([x.numpy() for x in result],
                          [899 for _ in range(num_clients)])

    set_default_executor.set_default_executor()
예제 #14
0
 def test_sequence_map_tf_dataset(self):
     ds = tf.data.Dataset.range(3)
     map_fn = computations.tf_computation(lambda x: x + 2, tf.int64)
     sequence_map_val = _make_sequence_map_value(self._sequence_executor,
                                                 tf.int64, tf.int64)
     ds_val = _run_sync(
         self._sequence_executor.create_value(
             ds, computation_types.SequenceType(tf.int64)))
     map_fn_val = _run_sync(
         self._sequence_executor.create_value(
             computation_impl.ComputationImpl.get_proto(map_fn),
             map_fn.type_signature))
     arg_val = _run_sync(
         self._sequence_executor.create_struct([map_fn_val, ds_val]))
     result_val = _run_sync(
         self._sequence_executor.create_call(sequence_map_val, arg_val))
     self.assertIsInstance(result_val,
                           sequence_executor.SequenceExecutorValue)
     self.assertEqual(str(result_val.type_signature), 'int64*')
     self.assertIsInstance(result_val.internal_representation,
                           sequence_executor._SequenceFromMap)
     result = list(_run_sync(result_val.compute()))
     self.assertListEqual(result, [2, 3, 4])
예제 #15
0
  def test_serialize_deserialize_sequence_of_namedtuples(self):
    test_tuple_type = collections.namedtuple('TestTuple', ['a', 'b', 'c'])

    def make_test_tuple(x):
      return test_tuple_type(
          a=x * 2, b=tf.cast(x, tf.int32), c=tf.cast(x - 1, tf.float32))

    ds = tf.data.Dataset.range(5).map(make_test_tuple)
    element_type = computation_types.to_type(
        test_tuple_type(tf.int64, tf.int32, tf.float32))
    sequence_type = computation_types.SequenceType(element=element_type)
    value_proto, value_type = executor_serialization.serialize_value(
        ds, sequence_type)
    self.assertIsInstance(value_proto, executor_pb2.Value)
    self.assertEqual(value_type, sequence_type)
    y, type_spec = executor_serialization.deserialize_value(value_proto)
    self.assert_types_equivalent(type_spec, sequence_type)
    actual_values = list(y)
    expected_values = [
        test_tuple_type(a=x * 2, b=x, c=x - 1.) for x in range(5)
    ]
    for actual, expected in zip(actual_values, expected_values):
      self.assertAllClose(actual, expected)
예제 #16
0
  def test_get_size_info(self, num_clients):

    @computations.federated_computation(
        type_factory.at_clients(computation_types.SequenceType(tf.float32)),
        type_factory.at_server(tf.float32))
    def comp(temperatures, threshold):
      client_data = [temperatures, intrinsics.federated_broadcast(threshold)]
      result_map = intrinsics.federated_map(
          count_over, intrinsics.federated_zip(client_data))
      count_map = intrinsics.federated_map(count_total, temperatures)
      return intrinsics.federated_mean(result_map, count_map)

    factory = executor_stacks.sizing_executor_factory(num_clients=num_clients)
    default_executor.set_default_executor(factory)

    to_float = lambda x: tf.cast(x, tf.float32)
    temperatures = [tf.data.Dataset.range(10).map(to_float)] * num_clients
    threshold = 15.0
    comp(temperatures, threshold)

    # Each client receives a tf.float32 and uploads two tf.float32 values.
    expected_broadcast_bits = [num_clients * 32]
    expected_aggregate_bits = [num_clients * 32 * 2]
    expected_broadcast_history = {
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients
    }
    expected_aggregate_history = {
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients * 2
    }

    size_info = factory.get_size_info()

    self.assertEqual(expected_broadcast_history, size_info.broadcast_history)
    self.assertEqual(expected_aggregate_history, size_info.aggregate_history)
    self.assertEqual(expected_broadcast_bits, size_info.broadcast_bits)
    self.assertEqual(expected_aggregate_bits, size_info.aggregate_bits)
예제 #17
0
    def test_batching_namedtuple_dataset(self):
        batch_type = collections.namedtuple('Batch', ['x', 'y'])
        federated_sequence_type = computation_types.FederatedType(
            computation_types.SequenceType(
                batch_type(
                    x=computation_types.TensorType(tf.float32, [None, 2]),
                    y=computation_types.TensorType(tf.float32, [None, 1]))),
            placements.CLIENTS,
            all_equal=False)

        @computations.tf_computation(federated_sequence_type.member)
        def test_batch_select_and_reduce(z):
            i = z.map(lambda x: x.y)
            return i.reduce(0., lambda x, y: x + tf.reduce_sum(y))

        @computations.federated_computation(federated_sequence_type)
        def map_y_sum(x):
            return intrinsics.federated_map(test_batch_select_and_reduce, x)

        ds = tf.data.Dataset.from_tensor_slices({
            'x': [[1., 2.], [3., 4.]],
            'y': [[5.], [6.]]
        }).batch(1)
        self.assertEqual(map_y_sum([ds] * 5), [np.array([[11.]])] * 5)
예제 #18
0
class IsSumCompatibleTest(parameterized.TestCase):

  @parameterized.named_parameters([
      ('tensor_type', computation_types.TensorType(tf.int32)),
      ('tuple_type_int', computation_types.StructType([tf.int32, tf.int32],)),
      ('tuple_type_float',
       computation_types.StructType([tf.complex128, tf.float32, tf.float64])),
      ('federated_type',
       computation_types.FederatedType(tf.int32, placement_literals.CLIENTS)),
  ])
  def test_positive_examples(self, type_spec):
    self.assertTrue(type_analysis.is_sum_compatible(type_spec))

  @parameterized.named_parameters([
      ('tensor_type_bool', computation_types.TensorType(tf.bool)),
      ('tensor_type_string', computation_types.TensorType(tf.string)),
      ('tuple_type', computation_types.StructType([tf.int32, tf.bool])),
      ('sequence_type', computation_types.SequenceType(tf.int32)),
      ('placement_type', computation_types.PlacementType()),
      ('function_type', computation_types.FunctionType(tf.int32, tf.int32)),
      ('abstract_type', computation_types.AbstractType('T')),
  ])
  def test_negative_examples(self, type_spec):
    self.assertFalse(type_analysis.is_sum_compatible(type_spec))
예제 #19
0
  def test_dataset_computation_where_client_data_is_ordered_dicts(self):
    client_data = from_tensor_slices_client_data.TestClientData(
        TEST_DATA_WITH_ORDEREDDICTS)

    dataset_computation = client_data.dataset_computation
    self.assertIsInstance(dataset_computation, computation_base.Computation)

    expected_dataset_comp_type_signature = computation_types.FunctionType(
        computation_types.to_type(tf.string),
        computation_types.SequenceType(
            collections.OrderedDict([
                ('x',
                 computation_types.TensorType(
                     client_data.element_type_structure['x'].dtype,
                     tf.TensorShape(2))),
                ('y',
                 computation_types.TensorType(
                     client_data.element_type_structure['y'].dtype, None)),
                ('z',
                 computation_types.TensorType(
                     client_data.element_type_structure['z'].dtype, None))
            ])))

    self.assertTrue(
        dataset_computation.type_signature.is_equivalent_to(
            expected_dataset_comp_type_signature))

    # Iterate over each client, invoking the computation and ensuring
    # we received a tf.data.Dataset with the correct data.
    for client_id in TEST_DATA_WITH_ORDEREDDICTS:
      dataset = dataset_computation(client_id)
      self.assertIsInstance(dataset, tf.data.Dataset)

      expected_dataset = tf.data.Dataset.from_tensor_slices(
          TEST_DATA_WITH_ORDEREDDICTS[client_id])
      self.assertSameDatasetsOfDicts(expected_dataset, dataset)
예제 #20
0
class CreateReplicateInputTest(parameterized.TestCase):
    @parameterized.named_parameters(
        ('int', computation_types.TensorType(tf.int32), 3, 10),
        ('float', computation_types.TensorType(tf.float32), 3, 10.0),
        ('unnamed_tuple',
         computation_types.NamedTupleType([tf.int32, tf.float32]), 3,
         anonymous_tuple.AnonymousTuple([(None, 10), (None, 10.0)])),
        ('named_tuple',
         computation_types.NamedTupleType([
             ('a', tf.int32), ('b', tf.float32)
         ]), 3, anonymous_tuple.AnonymousTuple([('a', 10), ('b', 10.0)])),
        ('sequence', computation_types.SequenceType(tf.int32), 3, [10] * 3),
    )
    def test_returns_computation(self, type_signature, count, value):
        proto = tensorflow_computation_factory.create_replicate_input(
            type_signature, count)

        self.assertIsInstance(proto, pb.Computation)
        actual_type = type_serialization.deserialize_type(proto.type)
        expected_type = computation_types.FunctionType(
            type_signature, [type_signature] * count)
        self.assertEqual(actual_type, expected_type)
        actual_result = test_utils.run_tensorflow(proto, value)
        expected_result = anonymous_tuple.AnonymousTuple([(None, value)] *
                                                         count)
        self.assertEqual(actual_result, expected_result)

    @parameterized.named_parameters(
        ('none_type', None, 3),
        ('none_count', computation_types.TensorType(tf.int32), None),
        ('federated_type', type_factory.at_server(tf.int32), 3),
    )
    def test_raises_type_error(self, type_signature, count):
        with self.assertRaises(TypeError):
            tensorflow_computation_factory.create_replicate_input(
                type_signature, count)
예제 #21
0
    def test_call_returned_directly_creates_canonical_form(self):
        @tff.federated_computation
        def init_fn():
            return tff.federated_value(42, tff.SERVER)

        @tff.federated_computation(tff.FederatedType(tf.int32, tff.SERVER),
                                   tff.FederatedType(
                                       tff.SequenceType(tf.float32),
                                       tff.CLIENTS))
        def next_fn(server_state, client_data):
            broadcast_state = tff.federated_broadcast(server_state)

            @tff.tf_computation(tf.int32, tff.SequenceType(tf.float32))
            @tf.function
            def some_transform(x, y):
                del y  # Unused
                return x + 1

            client_update = tff.federated_map(some_transform,
                                              (broadcast_state, client_data))
            aggregate_update = tff.federated_sum(client_update)
            server_output = tff.federated_value(1234, tff.SERVER)
            return aggregate_update, server_output

        @tff.federated_computation(
            tff.FederatedType(tf.int32, tff.SERVER),
            tff.FederatedType(computation_types.SequenceType(tf.float32),
                              tff.CLIENTS))
        def nested_next_fn(server_state, client_data):
            return next_fn(server_state, client_data)

        iterative_process = computation_utils.IterativeProcess(
            init_fn, nested_next_fn)
        cf = canonical_form_utils.get_canonical_form_for_iterative_process(
            iterative_process)
        self.assertIsInstance(cf, canonical_form.CanonicalForm)
예제 #22
0
 def test_stamp_parameter_in_graph_with_bool_sequence(self):
   with tf.Graph().as_default():
     x = self._checked_stamp_parameter('foo',
                                       computation_types.SequenceType(tf.bool))
     self.assertIsInstance(x, type_conversions.TF_DATASET_REPRESENTATION_TYPES)
     self.assertEqual(x.element_spec, tf.TensorSpec(shape=(), dtype=tf.bool))
예제 #23
0
 def test_make_whimsy_element_for_type_spec_raises_SequenceType(self):
   type_spec = computation_types.SequenceType(tf.float32)
   with self.assertRaisesRegex(ValueError,
                               'Cannot construct array for TFF type'):
     tensorflow_utils.make_whimsy_element_for_type_spec(type_spec)
예제 #24
0
def serialize_value(value, type_spec=None):
    """Serializes a value into `executor_pb2.Value`.

  Args:
    value: A value to be serialized.
    type_spec: Optional type spec, a `tff.Type` or something convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and the
    returned `ret_type_spec` is an instance of `tff.Type` that represents the
    TFF type of the serialized value.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
    type_spec = computation_types.to_type(type_spec)
    if isinstance(value, computation_pb2.Computation):
        type_spec = type_utils.reconcile_value_type_with_type_spec(
            type_serialization.deserialize_type(value.type), type_spec)
        return executor_pb2.Value(computation=value), type_spec
    elif isinstance(value, computation_impl.ComputationImpl):
        return serialize_value(
            computation_impl.ComputationImpl.get_proto(value),
            type_utils.reconcile_value_with_type_spec(value, type_spec))
    elif isinstance(type_spec, computation_types.TensorType):
        return serialize_tensor_value(value, type_spec)
    elif isinstance(type_spec, computation_types.NamedTupleType):
        type_elements = anonymous_tuple.to_elements(type_spec)
        val_elements = anonymous_tuple.to_elements(
            anonymous_tuple.from_container(value))
        tup_elems = []
        for (e_name, e_type), (_, e_val) in zip(type_elements, val_elements):
            e_proto, _ = serialize_value(e_val, e_type)
            tup_elems.append(
                executor_pb2.Value.Tuple.Element(
                    name=e_name if e_name else None, value=e_proto))
        result_proto = (executor_pb2.Value(tuple=executor_pb2.Value.Tuple(
            element=tup_elems)))
        return result_proto, type_spec
    elif isinstance(type_spec, computation_types.SequenceType):
        if not isinstance(value,
                          type_conversions.TF_DATASET_REPRESENTATION_TYPES):
            raise TypeError(
                'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
                    py_typecheck.type_string(type(value)),
                    type_spec if type_spec is not None else 'unknown'))

        value_type = computation_types.SequenceType(
            computation_types.to_type(value.element_spec))
        if not type_analysis.is_assignable_from(type_spec, value_type):
            raise TypeError(
                'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
                .format(value_type,
                        type_spec if type_spec is not None else 'unknown'))

        return serialize_sequence_value(value), type_spec
    elif isinstance(type_spec, computation_types.FederatedType):
        if type_spec.all_equal:
            value = [value]
        else:
            py_typecheck.check_type(value, list)
        items = []
        for v in value:
            it, it_type = serialize_value(v, type_spec.member)
            type_analysis.check_assignable_from(type_spec.member, it_type)
            items.append(it)
        result_proto = executor_pb2.Value(
            federated=executor_pb2.Value.Federated(
                type=type_serialization.serialize_type(type_spec).federated,
                value=items))
        return result_proto, type_spec
    else:
        raise ValueError(
            'Unable to serialize value with Python type {} and {} TFF type.'.
            format(str(py_typecheck.type_string(type(value))),
                   str(type_spec) if type_spec is not None else 'unknown'))
 def test_to_representation_for_type_with_sequence_type(self):
   foo = [1, 2, 3]
   self.assertEqual(
       reference_executor.to_representation_for_type(
           foo, computation_types.SequenceType(tf.int32)), foo)
예제 #26
0

@computations.tf_computation(tf.string)
def float_dataset_computation(x):
    del x  # Unused
    return tf.data.Dataset.range(5, output_type=tf.float32)


@computations.tf_computation(tf.int32)
def int_identity(x):
    return x


@computations.federated_computation(
    tf.int32,
    computation_types.FederatedType(computation_types.SequenceType(tf.int64),
                                    placements.CLIENTS),
    tf.float32,
)
def test_int64_sequence_struct_computation(a, dataset, b):
    return a, dataset, b


@computations.federated_computation(
    computation_types.FederatedType(computation_types.SequenceType(tf.int64),
                                    placements.CLIENTS))
def test_int64_sequence_computation(dataset):
    del dataset
    return intrinsics.federated_value(5, placements.SERVER)

 def test_serialize_sequence_not_a_dataset(self):
     with self.assertRaisesRegex(
             TypeError,
             r'Cannot serialize Python type int as .* float32\*'):
         _ = executor_service_utils.serialize_value(
             5, computation_types.SequenceType(tf.float32))
예제 #28
0
def create_preprocess_fn(
    num_epochs: int,
    batch_size: int,
    shuffle_buffer_size: int = NUM_EXAMPLES_PER_CLIENT,
    crop_shape: Tuple[int, int, int] = CIFAR_SHAPE,
    distort_image=False,
    num_parallel_calls: int = tf.data.experimental.AUTOTUNE
) -> computation_base.Computation:
    """Creates a preprocessing function for CIFAR-100 client datasets.

  Args:
    num_epochs: An integer representing the number of epochs to repeat the
      client datasets.
    batch_size: An integer representing the batch size on clients.
    shuffle_buffer_size: An integer representing the shuffle buffer size on
      clients. If set to a number <= 1, no shuffling occurs.
    crop_shape: A tuple (crop_height, crop_width, num_channels) specifying the
      desired crop shape for pre-processing. This tuple cannot have elements
      exceeding (32, 32, 3), element-wise. The element in the last index should
      be set to 3 to maintain the RGB image structure of the elements.
    distort_image: A boolean indicating whether to perform preprocessing that
      includes image distortion, including random crops and flips.
    num_parallel_calls: An integer representing the number of parallel calls
      used when performing `tf.data.Dataset.map`.

  Returns:
    A `tff.Computation` performing the preprocessing described above.

  Raises:
    TypeError: If `crop_shape` is not an iterable.
    ValueError: If `num_epochs` is a non-positive integer, if `crop_shape` is
      iterable but not length 3.
  """
    if num_epochs < 1:
        raise ValueError('num_epochs must be a positive integer.')
    if not isinstance(crop_shape, collections.abc.Iterable):
        raise TypeError('Argument crop_shape must be an iterable.')
    crop_shape = tuple(crop_shape)
    if len(crop_shape) != 3:
        raise ValueError(
            'The crop_shape must have length 3, corresponding to a '
            'tensor of shape [height, width, channels].')

    feature_dtypes = collections.OrderedDict(
        coarse_label=computation_types.TensorType(tf.int64),
        image=computation_types.TensorType(tf.uint8, shape=(32, 32, 3)),
        label=computation_types.TensorType(tf.int64))

    image_map_fn = build_image_map(crop_shape, distort_image)

    @computations.tf_computation(computation_types.SequenceType(feature_dtypes)
                                 )
    def preprocess_fn(dataset):
        if shuffle_buffer_size > 1:
            dataset = dataset.shuffle(shuffle_buffer_size)
        return (
            dataset.repeat(num_epochs)
            # We map before batching to ensure that the cropping occurs
            # at an image level (eg. we do not perform the same crop on
            # every image within a batch)
            .map(image_map_fn,
                 num_parallel_calls=num_parallel_calls).batch(batch_size))

    return preprocess_fn
예제 #29
0
def infer_type(arg: Any) -> Optional[computation_types.Type]:
    """Infers the TFF type of the argument (a `computation_types.Type` instance).

  WARNING: This function is only partially implemented.

  The kinds of arguments that are currently correctly recognized:
  - tensors, variables, and data sets,
  - things that are convertible to tensors (including numpy arrays, builtin
    types, as well as lists and tuples of any of the above, etc.),
  - nested lists, tuples, namedtuples, anonymous tuples, dict, and OrderedDicts.

  Args:
    arg: The argument, the TFF type of which to infer.

  Returns:
    Either an instance of `computation_types.Type`, or `None` if the argument is
    `None`.
  """
    # TODO(b/113112885): Implement the remaining cases here on the need basis.
    if arg is None:
        return None
    elif isinstance(arg, typed_object.TypedObject):
        return arg.type_signature
    elif tf.is_tensor(arg):
        return computation_types.TensorType(arg.dtype.base_dtype, arg.shape)
    elif isinstance(arg, TF_DATASET_REPRESENTATION_TYPES):
        element_type = computation_types.to_type(arg.element_spec)
        return computation_types.SequenceType(element_type)
    elif isinstance(arg, structure.Struct):
        return computation_types.StructType([
            (k, infer_type(v)) if k else infer_type(v)
            for k, v in structure.iter_elements(arg)
        ])
    elif py_typecheck.is_attrs(arg):
        items = attr.asdict(arg,
                            dict_factory=collections.OrderedDict,
                            recurse=False)
        return computation_types.StructWithPythonType(
            [(k, infer_type(v)) for k, v in items.items()], type(arg))
    elif py_typecheck.is_named_tuple(arg):
        # In Python 3.8 and later `_asdict` no longer return OrdereDict, rather a
        # regular `dict`.
        items = collections.OrderedDict(arg._asdict())
        return computation_types.StructWithPythonType(
            [(k, infer_type(v)) for k, v in items.items()], type(arg))
    elif isinstance(arg, dict):
        if isinstance(arg, collections.OrderedDict):
            items = arg.items()
        else:
            items = sorted(arg.items())
        return computation_types.StructWithPythonType([(k, infer_type(v))
                                                       for k, v in items],
                                                      type(arg))
    elif isinstance(arg, (tuple, list)):
        elements = []
        all_elements_named = True
        for element in arg:
            all_elements_named &= py_typecheck.is_name_value_pair(element)
            elements.append(infer_type(element))
        # If this is a tuple of (name, value) pairs, the caller most likely intended
        # this to be a StructType, so we avoid storing the Python container.
        if elements and all_elements_named:
            return computation_types.StructType(elements)
        else:
            return computation_types.StructWithPythonType(elements, type(arg))
    elif isinstance(arg, str):
        return computation_types.TensorType(tf.string)
    elif isinstance(arg, (np.generic, np.ndarray)):
        return computation_types.TensorType(tf.dtypes.as_dtype(arg.dtype),
                                            arg.shape)
    else:
        dtype = {
            bool: tf.bool,
            int: tf.int32,
            float: tf.float32
        }.get(type(arg))
        if dtype:
            return computation_types.TensorType(dtype)
        else:
            # Now fall back onto the heavier-weight processing, as all else failed.
            # Use make_tensor_proto() to make sure to handle it consistently with
            # how TensorFlow is handling values (e.g., recognizing int as int32, as
            # opposed to int64 as in NumPy).
            try:
                # TODO(b/113112885): Find something more lightweight we could use here.
                tensor_proto = tf.make_tensor_proto(arg)
                return computation_types.TensorType(
                    tf.dtypes.as_dtype(tensor_proto.dtype),
                    tf.TensorShape(tensor_proto.tensor_shape))
            except TypeError as err:
                raise TypeError(
                    'Could not infer the TFF type of {}: {}'.format(
                        py_typecheck.type_string(type(arg)), err))
예제 #30
0
    computation_types.FunctionType(parameter=type_constructors.at_server(
        computation_types.AbstractType('T')),
                                   result=type_constructors.at_clients(
                                       computation_types.AbstractType('T'),
                                       True)))

# Materializes client items as a sequence on the server.
#
# Type signature: {T}@CLIENTS -> T*@SERVER
FEDERATED_COLLECT = IntrinsicDef(
    'FEDERATED_COLLECT', 'federated_collect',
    computation_types.FunctionType(
        parameter=type_constructors.at_clients(
            computation_types.AbstractType('T')),
        result=type_constructors.at_server(
            computation_types.SequenceType(
                computation_types.AbstractType('T')))))

# Maps member constituents of a client value pointwise using a given mapping
# function that operates independently on each client.
#
# Type signature: <(T->U),{T}@CLIENTS> -> {U}@CLIENTS
FEDERATED_MAP = IntrinsicDef(
    'FEDERATED_MAP', 'federated_map',
    computation_types.FunctionType(parameter=[
        computation_types.FunctionType(computation_types.AbstractType('T'),
                                       computation_types.AbstractType('U')),
        type_constructors.at_clients(computation_types.AbstractType('T')),
    ],
                                   result=type_constructors.at_clients(
                                       computation_types.AbstractType('U'))))