Пример #1
0
 def type_signature(self):
     return computation_types.TensorType(tf.bool)
    async def compute_federated_select(
        self, arg: FederatedResolvingStrategyValue
    ) -> FederatedResolvingStrategyValue:
        client_keys_type, max_key_type, server_val_type, select_fn_type = (
            arg.type_signature)
        py_typecheck.check_type(arg.internal_representation, structure.Struct)
        client_keys, max_key, server_val, select_fn = arg.internal_representation
        # We slice up the value as-needed, so `max_key` is not used.
        del max_key, max_key_type
        del server_val_type  # unused
        py_typecheck.check_type(client_keys, list)
        py_typecheck.check_type(server_val, list)
        server_val_at_server = server_val[0]
        py_typecheck.check_type(server_val_at_server,
                                executor_value_base.ExecutorValue)
        py_typecheck.check_type(select_fn, pb.Computation)
        server = self._target_executors[placements.SERVER][0]
        clients = self._target_executors[placements.CLIENTS]
        single_key_type = computation_types.TensorType(tf.int32)
        client_keys_type.member.check_tensor()
        if (client_keys_type.member.dtype != tf.int32
                or client_keys_type.member.shape.rank != 1):
            raise TypeError(
                f'Unexpected `client_keys_type`: {client_keys_type}')
        num_keys_per_client: int = client_keys_type.member.shape.dims[0].value
        unplaced_result_type = computation_types.SequenceType(
            select_fn_type.result)
        select_fn_at_server = await server.create_value(
            select_fn, select_fn_type)
        index_fn_at_server = await executor_utils.embed_indexing_operator(
            server, client_keys_type.member, single_key_type)

        async def select_single_key(keys_at_server, key_index):
            # Grab the `key_index`th key from the keys tensor.
            index_arg = await server.create_struct(
                structure.Struct([
                    (None, keys_at_server),
                    (None, await server.create_value(key_index,
                                                     single_key_type)),
                ]))
            key_at_server = await server.create_call(index_fn_at_server,
                                                     index_arg)
            select_fn_arg = await server.create_struct(
                structure.Struct([
                    (None, server_val_at_server),
                    (None, key_at_server),
                ]))
            selected = await server.create_call(select_fn_at_server,
                                                select_fn_arg)
            return await selected.compute()

        async def select_single_client(client, keys_at_client):
            keys_at_server = await server.create_value(
                await keys_at_client.compute(), client_keys_type.member)
            unplaced_values = await asyncio.gather(*[
                select_single_key(keys_at_server, i)
                for i in range(num_keys_per_client)
            ])
            return await client.create_value(unplaced_values,
                                             unplaced_result_type)

        return FederatedResolvingStrategyValue(
            list(await asyncio.gather(*[
                select_single_client(client, keys_at_client)
                for client, keys_at_client in zip(clients, client_keys)
            ])), computation_types.at_clients(unplaced_result_type))
Пример #3
0
    def test_init_does_not_raise_type_error_with_unknown_dimensions(self):
        server_state_type = computation_types.TensorType(shape=[None],
                                                         dtype=tf.int32)

        @computations.tf_computation
        def initialize():
            # Return a value of a type assignable to, but not equal to
            # `server_state_type`
            return tf.constant([1, 2, 3])

        @computations.tf_computation(server_state_type)
        def prepare(server_state):
            del server_state  # Unused
            return tf.constant(1.0)

        @computations.tf_computation(computation_types.SequenceType(
            tf.float32), tf.float32)
        def work(client_data, client_input):
            del client_data  # Unused
            del client_input  # Unused
            return True, []

        @computations.tf_computation
        def zero():
            return tf.constant([], dtype=tf.string)

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string),
            tf.bool)
        def accumulate(accumulator, client_update):
            del accumulator  # Unused
            del client_update  # Unused
            return tf.constant(['abc'])

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string),
            computation_types.TensorType(shape=[None], dtype=tf.string))
        def merge(accumulator1, accumulator2):
            del accumulator1  # Unused
            del accumulator2  # Unused
            return tf.constant(['abc'])

        @computations.tf_computation(
            computation_types.TensorType(shape=[None], dtype=tf.string))
        def report(accumulator):
            del accumulator  # Unused
            return tf.constant(1.0)

        @computations.tf_computation
        def bitwidth():
            return []

        @computations.tf_computation(
            server_state_type, (tf.float32, computation_types.StructType([])))
        def update(server_state, global_update):
            del server_state  # Unused
            del global_update  # Unused
            # Return a new server state value whose type is assignable but not equal
            # to `server_state_type`, and which is different from the type returned
            # by `initialize`.
            return tf.constant([1]), []

        try:
            forms.MapReduceForm(initialize, prepare, work, zero, accumulate,
                                merge, report, bitwidth, update)
        except TypeError:
            self.fail('Raised TypeError unexpectedly.')
Пример #4
0
 def test_serialize_tensor_type(self, dtype, shape):
     type_signature = computation_types.TensorType(dtype, shape)
     actual_proto = type_serialization.serialize_type(type_signature)
     expected_proto = pb.Type(tensor=pb.TensorType(
         dtype=dtype.as_datatype_enum, dims=_shape_to_dims(shape)))
     self.assertEqual(actual_proto, expected_proto)
Пример #5
0
def create_preprocess_fn(
    num_epochs: int,
    batch_size: int,
    shuffle_buffer_size: int = NUM_EXAMPLES_PER_CLIENT,
    crop_shape: Tuple[int, int, int] = CIFAR_SHAPE,
    distort_image=False,
    num_parallel_calls: int = tf.data.experimental.AUTOTUNE
) -> computation_base.Computation:
    """Creates a preprocessing function for CIFAR-100 client datasets.

  Args:
    num_epochs: An integer representing the number of epochs to repeat the
      client datasets.
    batch_size: An integer representing the batch size on clients.
    shuffle_buffer_size: An integer representing the shuffle buffer size on
      clients. If set to a number <= 1, no shuffling occurs.
    crop_shape: A tuple (crop_height, crop_width, num_channels) specifying the
      desired crop shape for pre-processing. This tuple cannot have elements
      exceeding (32, 32, 3), element-wise. The element in the last index should
      be set to 3 to maintain the RGB image structure of the elements.
    distort_image: A boolean indicating whether to perform preprocessing that
      includes image distortion, including random crops and flips.
    num_parallel_calls: An integer representing the number of parallel calls
      used when performing `tf.data.Dataset.map`.

  Returns:
    A `tff.Computation` performing the preprocessing described above.

  Raises:
    TypeError: If `crop_shape` is not an iterable.
    ValueError: If `num_epochs` is a non-positive integer, if `crop_shape` is
      iterable but not length 3.
  """
    if num_epochs < 1:
        raise ValueError('num_epochs must be a positive integer.')
    if not isinstance(crop_shape, collections.abc.Iterable):
        raise TypeError('Argument crop_shape must be an iterable.')
    crop_shape = tuple(crop_shape)
    if len(crop_shape) != 3:
        raise ValueError(
            'The crop_shape must have length 3, corresponding to a '
            'tensor of shape [height, width, channels].')

    # Features are intentionally sorted lexicographically by key for consistency
    # across datasets.
    feature_dtypes = collections.OrderedDict(
        coarse_label=computation_types.TensorType(tf.int64),
        image=computation_types.TensorType(tf.uint8, shape=(32, 32, 3)),
        label=computation_types.TensorType(tf.int64))

    image_map_fn = build_image_map(crop_shape, distort_image)

    @computations.tf_computation(computation_types.SequenceType(feature_dtypes)
                                 )
    def preprocess_fn(dataset):
        if shuffle_buffer_size > 1:
            dataset = dataset.shuffle(shuffle_buffer_size)
        return (
            dataset.repeat(num_epochs)
            # We map before batching to ensure that the cropping occurs
            # at an image level (eg. we do not perform the same crop on
            # every image within a batch)
            .map(image_map_fn,
                 num_parallel_calls=num_parallel_calls).batch(batch_size))

    return preprocess_fn
Пример #6
0
 def test_returns_tf_computation_ompiled_comp(self):
     concrete_int_type = computation_types.TensorType(tf.int32)
     tf_identity = building_block_factory.create_compiled_identity(
         concrete_int_type)
     self.assert_compiles_to_tensorflow(tf_identity)
 def test_returns_empty_dict_none_value(self):
   type_signature = computation_types.TensorType(tf.int32)
   self.assertEqual(
       cardinalities_utils.infer_cardinalities(None, type_signature), {})
Пример #8
0
 def test_int_ranges_beyond_2_pow_32(self):
     secure_sum_f = secure.SecureSumFactory(2**33, -2**33)
     # Bounds this large should be provided only with tf.int64 value_type.
     process = secure_sum_f.create(computation_types.TensorType(tf.int64))
     self.assertEqual(
         process.next.type_signature.result.result.member.dtype, tf.int64)
Пример #9
0
 def test_value_type_incompatible_with_config_mode_raises_float(
         self, upper, lower):
     secure_sum_f = secure.SecureSumFactory(upper, lower)
     with self.assertRaises(TypeError):
         secure_sum_f.create(computation_types.TensorType(tf.int32))
Пример #10
0
 def test_normalize_tensor_representation_int32(self):
   result = runtime.normalize_tensor_representation(
       10, computation_types.TensorType(np.int32))
   self.assertIsInstance(result, np.int32)
   self.assertEqual(result, 10)
class MemoryReleaseManagerTest(parameterized.TestCase,
                               unittest.IsolatedAsyncioTestCase,
                               tf.test.TestCase):

    # pyformat: disable
    @parameterized.named_parameters(
        # materialized values
        ('none', None, computation_types.StructType([]), None),
        ('bool', True, computation_types.TensorType(tf.bool), True),
        ('int', 1, computation_types.TensorType(tf.int32), 1),
        ('str', 'a', computation_types.TensorType(tf.string), 'a'),
        ('tensor_int', tf.constant(1), computation_types.TensorType(
            tf.int32), tf.constant(1)),
        ('tensor_str', tf.constant('a'), computation_types.TensorType(
            tf.string), tf.constant('a')),
        ('tensor_array', tf.ones([3], tf.int32),
         computation_types.TensorType(tf.int32, [3]), tf.ones([3], tf.int32)),
        ('numpy_int', np.int32(1), computation_types.TensorType(
            tf.int32), np.int32(1)),
        ('numpy_array', np.ones([3], int),
         computation_types.TensorType(tf.int32, [3]), np.ones([3], int)),

        # value references
        ('value_reference_tensor',
         program_test_utils.TestMaterializableValueReference(1),
         computation_types.TensorType(tf.int32), 1),
        ('value_reference_sequence',
         program_test_utils.TestMaterializableValueReference(
             tf.data.Dataset.from_tensor_slices(
                 [1, 2, 3])), computation_types.SequenceType(
                     tf.int32), tf.data.Dataset.from_tensor_slices([1, 2, 3])),

        # structures
        ('list', [
            True,
            program_test_utils.TestMaterializableValueReference(1), 'a'
        ], computation_types.SequenceType([tf.bool, tf.int32, tf.string
                                           ]), [True, 1, 'a']),
        ('list_empty', [], computation_types.SequenceType([]), []),
        ('list_nested', [
            [True,
             program_test_utils.TestMaterializableValueReference(1)], ['a']
        ], computation_types.SequenceType([[tf.bool, tf.int32], [tf.string]
                                           ]), [[True, 1], ['a']]),
        ('dict', {
            'a': True,
            'b': program_test_utils.TestMaterializableValueReference(1),
            'c': 'a'
        },
         computation_types.SequenceType([('a', tf.bool), ('b', tf.int32),
                                         ('c', tf.string)]), {
                                             'a': True,
                                             'b': 1,
                                             'c': 'a'
                                         }),
        ('dict_empty', {}, computation_types.SequenceType([]), {}),
        ('dict_nested', {
            'x': {
                'a': True,
                'b': program_test_utils.TestMaterializableValueReference(1)
            },
            'y': {
                'c': 'a'
            }
        },
         computation_types.SequenceType([('x', [('a', tf.bool),
                                                ('b', tf.int32)]),
                                         ('y', [('c', tf.string)])]), {
                                             'x': {
                                                 'a': True,
                                                 'b': 1
                                             },
                                             'y': {
                                                 'c': 'a'
                                             }
                                         }),
        ('attr',
         program_test_utils.TestAttrObject2(
             True, program_test_utils.TestMaterializableValueReference(1)),
         computation_types.SequenceType([
             ('a', tf.bool), ('b', tf.int32)
         ]), program_test_utils.TestAttrObject2(True, 1)),
        ('attr_nested',
         program_test_utils.TestAttrObject2(
             program_test_utils.TestAttrObject2(
                 True, program_test_utils.TestMaterializableValueReference(1)),
             program_test_utils.TestAttrObject1('a')),
         computation_types.SequenceType([('a', [('a', tf.bool),
                                                ('b', tf.int32)]),
                                         ('b', [('c', tf.string)])]),
         program_test_utils.TestAttrObject2(
             program_test_utils.TestAttrObject2(True, 1),
             program_test_utils.TestAttrObject1('a'))),
    )
    # pyformat: enable
    async def test_release_saves_value(self, value, type_signature,
                                       expected_value):
        release_mngr = memory_release_manager.MemoryReleaseManager()

        await release_mngr.release(value, type_signature, 1)

        self.assertLen(release_mngr._values, 1)
        actual_value = release_mngr._values[1]
        if isinstance(actual_value, tf.data.Dataset):
            actual_value = list(actual_value)
        if isinstance(expected_value, tf.data.Dataset):
            expected_value = list(expected_value)
        self.assertAllEqual(actual_value, expected_value)

    @parameterized.named_parameters(
        ('none', None),
        ('bool', True),
        ('int', 1),
        ('str', 'a'),
    )
    async def test_release_does_not_raise_type_error_with_key(self, key):
        release_mngr = memory_release_manager.MemoryReleaseManager()
        value = 1
        type_signature = computation_types.TensorType(tf.int32)

        try:
            await release_mngr.release(value, type_signature, key)
        except TypeError:
            self.fail('Raised TypeError unexpectedly.')

    @parameterized.named_parameters(
        ('list', []), )
    async def test_release_raises_type_error_with_key(self, key):
        release_mngr = memory_release_manager.MemoryReleaseManager()
        value = 1
        type_signature = computation_types.TensorType(tf.int32)

        with self.assertRaises(TypeError):
            await release_mngr.release(value, type_signature, key)

    @parameterized.named_parameters(
        ('0', 0),
        ('1', 1),
        ('10', 10),
    )
    def test_values_returns_values(self, count):
        release_mngr = memory_release_manager.MemoryReleaseManager()
        for i in range(count):
            release_mngr._values[i] = i * 10

        values = release_mngr.values()

        self.assertEqual(values, {i: i * 10 for i in range(count)})

    def test_values_returns_copy(self):
        release_mngr = memory_release_manager.MemoryReleaseManager()

        values_1 = release_mngr.values()
        values_2 = release_mngr.values()
        self.assertIsNot(values_1, values_2)
Пример #12
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import tensorflow as tf

from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import factory_utils
from tensorflow_federated.python.aggregators import mean
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.core.impl.types import computation_types

_TEST_VALUE_TYPE = computation_types.TensorType(tf.float32, (2,))
_TEST_WEIGHT_TYPE = computation_types.TensorType(tf.float32)


class UnweightedAsWeightedAggregationTest(tf.test.TestCase):

  def test_returns_weighted_factory(self):
    wrapped_factory = factory_utils.as_weighted_aggregator(
        sum_factory.SumFactory())
    self.assertIsInstance(wrapped_factory, factory.WeightedAggregationFactory)

  def test_wrapped_aggregator_same_as_unweighted_aggregator(self):
    unweighted_factory = sum_factory.SumFactory()
    wrapped_factory = factory_utils.as_weighted_aggregator(unweighted_factory)

    unweighted_aggregator = unweighted_factory.create(_TEST_VALUE_TYPE)
Пример #13
0
 def expect_tfint32_return_5(tensor_type):
     self.assert_types_identical(tensor_type,
                                 computation_types.TensorType(tf.int32))
     return 5
Пример #14
0
 def test_with_int_vector(self):
     type_signature = computation_types.TensorType(tf.int32, [10])
     tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)
     self.assert_nested_struct_eq(tensor_specs, tf.TensorSpec([10],
                                                              tf.int32))
Пример #15
0
 def test_feeds_and_fetches_different(self):
     proto, _ = tensorflow_computation_factory.create_identity(
         computation_types.TensorType(tf.int32))
     self.assertNotEqual(proto.tensorflow.parameter,
                         proto.tensorflow.result)
Пример #16
0
 def test_value_type_incompatible_with_config_mode_raises_two_processes(
         self):
     secure_sum_f = secure.SecureSumFactory(_test_estimation_process(1),
                                            _test_estimation_process(-1))
     with self.assertRaises(TypeError):
         secure_sum_f.create(computation_types.TensorType(tf.int32))
Пример #17
0
 def test_build_encoded_broadcast_process_raises_bad_encoder(
         self, bad_encoder):
     value_type = computation_types.TensorType(tf.float32, shape=[2])
     with self.assertRaises(TypeError):
         encoding_utils.build_encoded_broadcast_process(
             value_type, bad_encoder)
Пример #18
0
class SecureModularSumFactoryComputationTest(tf.test.TestCase,
                                             parameterized.TestCase):
    @parameterized.named_parameters(
        ('scalar_non_symmetric_int32', 8, tf.int32, False),
        ('scalar_non_symmetric_int64', 8, tf.int64, False),
        ('struct_non_symmetric', 8, _test_struct_type(tf.int32), False),
        ('scalar_symmetric_int32', 8, tf.int32, True),
        ('scalar_symmetric_int64', 8, tf.int64, True),
        ('struct_symmetric', 8, _test_struct_type(tf.int32), True),
        ('numpy_modulus_non_symmetric', np.int32(8), tf.int32, False),
        ('numpy_modulus_symmetric', np.int32(8), tf.int32, True),
    )
    def test_type_properties(self, modulus, value_type, symmetric_range):
        factory_ = secure.SecureModularSumFactory(
            modulus=modulus, symmetric_range=symmetric_range)
        self.assertIsInstance(factory_, factory.UnweightedAggregationFactory)
        value_type = computation_types.to_type(value_type)
        process = factory_.create(value_type)
        self.assertIsInstance(process, aggregation_process.AggregationProcess)

        expected_state_type = computation_types.at_server(
            computation_types.to_type(()))
        expected_measurements_type = expected_state_type

        expected_initialize_type = computation_types.FunctionType(
            parameter=None, result=expected_state_type)
        self.assertTrue(
            process.initialize.type_signature.is_equivalent_to(
                expected_initialize_type))

        expected_next_type = computation_types.FunctionType(
            parameter=collections.OrderedDict(
                state=expected_state_type,
                value=computation_types.at_clients(value_type)),
            result=measured_process.MeasuredProcessOutput(
                state=expected_state_type,
                result=computation_types.at_server(value_type),
                measurements=expected_measurements_type))
        self.assertTrue(
            process.next.type_signature.is_equivalent_to(expected_next_type))
        try:
            static_assert.assert_not_contains_unsecure_aggregation(
                process.next)
        except:  # pylint: disable=bare-except
            self.fail('Factory returned an AggregationProcess containing '
                      'non-secure aggregation.')

    def test_float_modulus_raises(self):
        with self.assertRaises(TypeError):
            secure.SecureModularSumFactory(modulus=8.0)
        with self.assertRaises(TypeError):
            secure.SecureModularSumFactory(modulus=np.float32(8.0))

    def test_modulus_not_positive_raises(self):
        with self.assertRaises(ValueError):
            secure.SecureModularSumFactory(modulus=0)
        with self.assertRaises(ValueError):
            secure.SecureModularSumFactory(modulus=-1)

    def test_symmetric_range_not_bool_raises(self):
        with self.assertRaises(TypeError):
            secure.SecureModularSumFactory(modulus=8, symmetric_range='True')

    @parameterized.named_parameters(
        ('float_type', computation_types.TensorType(tf.float32)),
        ('mixed_type', computation_types.to_type([tf.float32, tf.int32])),
        ('federated_type',
         computation_types.FederatedType(tf.int32, placements.SERVER)),
        ('function_type', computation_types.FunctionType(None, ())),
        ('sequence_type', computation_types.SequenceType(tf.float32)))
    def test_incorrect_value_type_raises(self, bad_value_type):
        with self.assertRaises(TypeError):
            secure.SecureModularSumFactory(8).create(bad_value_type)
Пример #19
0
 def test_passes_on_tf(self):
     tf_comp = building_block_factory.create_compiled_identity(
         computation_types.TensorType(tf.int32))
     transformed = compiler.compile_local_computation_to_tensorflow(tf_comp)
     self.assertEqual(tf_comp, transformed)
Пример #20
0
class ContainsOnlyServerPlacedDataTest(parameterized.TestCase):

  # pyformat: disable
  @parameterized.named_parameters(
      ('struct_unnamed', computation_types.StructType([
          (None, computation_types.TensorType(tf.bool)),
          (None, computation_types.TensorType(tf.int32)),
          (None, computation_types.TensorType(tf.string)),
      ])),
      ('struct_named', computation_types.StructType([
          ('a', computation_types.TensorType(tf.bool)),
          ('b', computation_types.TensorType(tf.int32)),
          ('c', computation_types.TensorType(tf.string)),
      ])),
      ('struct_nested', computation_types.StructType([
          ('x', computation_types.StructType([
              ('a', computation_types.TensorType(tf.bool)),
              ('b', computation_types.TensorType(tf.int32)),
          ])),
          ('y', computation_types.StructType([
              ('c', computation_types.TensorType(tf.string)),
          ])),
      ])),
      ('federated_struct', computation_types.FederatedType(
          computation_types.StructType([
              ('a', computation_types.TensorType(tf.bool)),
              ('b', computation_types.TensorType(tf.int32)),
              ('c', computation_types.TensorType(tf.string)),
          ]),
          placements.SERVER)),
      ('federated_sequence', computation_types.FederatedType(
          computation_types.SequenceType(
              computation_types.TensorType(tf.int32)),
          placements.SERVER)),
      ('federated_tensor', computation_types.FederatedType(
          computation_types.TensorType(tf.int32),
          placements.SERVER)),
      ('sequence', computation_types.SequenceType(
          computation_types.TensorType(tf.int32))),
      ('tensor', computation_types.TensorType(tf.int32)),
  )
  # pyformat: enable
  def test_returns_true(self, type_signature):
    result = federated_context.contains_only_server_placed_data(type_signature)

    self.assertTrue(result)

  # pyformat: disable
  @parameterized.named_parameters(
      ('federated', computation_types.FederatedType(
          computation_types.TensorType(tf.int32),
          placements.CLIENTS)),
      ('function', computation_types.FunctionType(
          computation_types.TensorType(tf.int32),
          computation_types.TensorType(tf.int32))),
      ('placement', computation_types.PlacementType()),
  )
  # pyformat: enable
  def test_returns_false(self, type_signature):
    result = federated_context.contains_only_server_placed_data(type_signature)

    self.assertFalse(result)

  @parameterized.named_parameters(
      ('none', None),
      ('bool', True),
      ('int', 1),
      ('str', 'a'),
      ('list', []),
  )
  def test_raises_type_error_with_type_signature(self, type_signature):
    with self.assertRaises(TypeError):
      federated_context.contains_only_server_placed_data(type_signature)
Пример #21
0
 def test_noops_on_int(self):
   type_signature = computation_types.TensorType(tf.int32)
   cardinalities = cardinalities_utils.infer_cardinalities(1, type_signature)
   self.assertEmpty(cardinalities)
Пример #22
0
 def _mapping_fn(x):
     if not tf.is_tensor(x):
         x = tf.convert_to_tensor(x)
     return computation_types.TensorType(x.dtype.base_dtype, x.shape)
Пример #23
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from absl.testing import parameterized
import tensorflow as tf

from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.learning import model_update_aggregator

_float_type = computation_types.TensorType(tf.float32)


class ModelUpdateAggregatorTest(test_case.TestCase, parameterized.TestCase):
    @parameterized.named_parameters(
        ('simple', False, False),
        ('zeroing', True, False),
        ('clipping', False, True),
        ('zeroing_and_clipping', False, False),
    )
    def test_robust_aggregator_weighted(self, zeroing, clipping):
        factory_ = model_update_aggregator.robust_aggregator(zeroing=zeroing,
                                                             clipping=clipping)

        self.assertIsInstance(factory_, factory.WeightedAggregationFactory)
        process = factory_.create(_float_type, _float_type)
Пример #24
0
def infer_type(arg: Any) -> Optional[computation_types.Type]:
    """Infers the TFF type of the argument (a `computation_types.Type` instance).

  Warning: This function is only partially implemented.

  The kinds of arguments that are currently correctly recognized:
  * tensors, variables, and data sets
  * things that are convertible to tensors (including `numpy` arrays, builtin
    types, as well as `list`s and `tuple`s of any of the above, etc.)
  * nested lists, `tuple`s, `namedtuple`s, anonymous `tuple`s, `dict`,
    `OrderedDict`s, `dataclasses`, `attrs` classes, and `tff.TypedObject`s

  Args:
    arg: The argument, the TFF type of which to infer.

  Returns:
    Either an instance of `computation_types.Type`, or `None` if the argument is
    `None`.
  """
    if arg is None:
        return None
    elif isinstance(arg, typed_object.TypedObject):
        return arg.type_signature
    elif tf.is_tensor(arg):
        # `tf.is_tensor` returns true for some things that are not actually single
        # `tf.Tensor`s, including `tf.SparseTensor`s and `tf.RaggedTensor`s.
        if isinstance(arg, tf.RaggedTensor):
            return computation_types.StructWithPythonType(
                (('flat_values', infer_type(arg.flat_values)),
                 ('nested_row_splits', infer_type(arg.nested_row_splits))),
                tf.RaggedTensor)
        elif isinstance(arg, tf.SparseTensor):
            return computation_types.StructWithPythonType(
                (('indices', infer_type(arg.indices)),
                 ('values', infer_type(arg.values)),
                 ('dense_shape', infer_type(arg.dense_shape))),
                tf.SparseTensor)
        else:
            return computation_types.TensorType(arg.dtype.base_dtype,
                                                arg.shape)
    elif isinstance(arg, TF_DATASET_REPRESENTATION_TYPES):
        element_type = computation_types.to_type(arg.element_spec)
        return computation_types.SequenceType(element_type)
    elif isinstance(arg, structure.Struct):
        return computation_types.StructType([
            (k, infer_type(v)) if k else infer_type(v)
            for k, v in structure.iter_elements(arg)
        ])
    elif py_typecheck.is_attrs(arg):
        items = named_containers.attrs_class_to_odict(arg).items()
        return computation_types.StructWithPythonType([(k, infer_type(v))
                                                       for k, v in items],
                                                      type(arg))
    elif py_typecheck.is_dataclass(arg):
        items = named_containers.dataclass_to_odict(arg).items()
        return computation_types.StructWithPythonType([(k, infer_type(v))
                                                       for k, v in items],
                                                      type(arg))
    elif py_typecheck.is_named_tuple(arg):
        # In Python 3.8 and later `_asdict` no longer return OrderedDict, rather a
        # regular `dict`.
        items = collections.OrderedDict(arg._asdict())
        return computation_types.StructWithPythonType(
            [(k, infer_type(v)) for k, v in items.items()], type(arg))
    elif isinstance(arg, dict):
        if isinstance(arg, collections.OrderedDict):
            items = arg.items()
        else:
            items = sorted(arg.items())
        return computation_types.StructWithPythonType([(k, infer_type(v))
                                                       for k, v in items],
                                                      type(arg))
    elif isinstance(arg, (tuple, list)):
        elements = []
        all_elements_named = True
        for element in arg:
            all_elements_named &= py_typecheck.is_name_value_pair(element)
            elements.append(infer_type(element))
        # If this is a tuple of (name, value) pairs, the caller most likely intended
        # this to be a StructType, so we avoid storing the Python container.
        if elements and all_elements_named:
            return computation_types.StructType(elements)
        else:
            return computation_types.StructWithPythonType(elements, type(arg))
    elif isinstance(arg, str):
        return computation_types.TensorType(tf.string)
    elif isinstance(arg, (np.generic, np.ndarray)):
        return computation_types.TensorType(tf.dtypes.as_dtype(arg.dtype),
                                            arg.shape)
    else:
        arg_type = type(arg)
        if arg_type is bool:
            return computation_types.TensorType(tf.bool)
        elif arg_type is int:
            # Chose the integral type based on value.
            if arg > tf.int64.max or arg < tf.int64.min:
                raise TypeError(
                    'No integral type support for values outside range '
                    f'[{tf.int64.min}, {tf.int64.max}]. Got: {arg}')
            elif arg > tf.int32.max or arg < tf.int32.min:
                return computation_types.TensorType(tf.int64)
            else:
                return computation_types.TensorType(tf.int32)
        elif arg_type is float:
            return computation_types.TensorType(tf.float32)
        else:
            # Now fall back onto the heavier-weight processing, as all else failed.
            # Use make_tensor_proto() to make sure to handle it consistently with
            # how TensorFlow is handling values (e.g., recognizing int as int32, as
            # opposed to int64 as in NumPy).
            try:
                # TODO(b/113112885): Find something more lightweight we could use here.
                tensor_proto = tf.make_tensor_proto(arg)
                return computation_types.TensorType(
                    tf.dtypes.as_dtype(tensor_proto.dtype),
                    tf.TensorShape(tensor_proto.tensor_shape))
            except TypeError as e:
                raise TypeError('Could not infer the TFF type of {}.'.format(
                    py_typecheck.type_string(type(arg)))) from e
Пример #25
0
# limitations under the License.

import collections

import attr
import tensorflow as tf

from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.computation import function_utils
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper

tffint32 = computation_types.TensorType(tf.int32)

tffstring = computation_types.TensorType(tf.string)


def build_zero_argument(parameter_type):
    if parameter_type is None:
        return None
    elif parameter_type.is_struct():
        return structure.map_structure(build_zero_argument, parameter_type)
    elif parameter_type == tffint32:
        return 0
    elif parameter_type == tffstring:
        return ''
    else:
        raise NotImplementedError(f'Unsupported type: {parameter_type}')
Пример #26
0
class CreateBinaryOperatorWithUpcastTest(parameterized.TestCase):

    # pyformat: disable
    @parameterized.named_parameters(
        ('add_int_same_shape', tf.math.add,
         computation_types.StructType([
             computation_types.TensorType(tf.int32),
             computation_types.TensorType(tf.int32),
         ]), [1, 2], 3),
        ('add_int_different_shape', tf.math.add,
         computation_types.StructType([
             computation_types.TensorType(tf.int32, shape=[1]),
             computation_types.TensorType(tf.int32),
         ]), [np.array([1]), 2], 3),
        ('add_int_different_types', tf.math.add,
         computation_types.StructType([
             computation_types.StructType(
                 [computation_types.TensorType(tf.int32, shape=[1])]),
             computation_types.TensorType(tf.int32),
         ]), [[np.array([1])], 2], structure.Struct([(None, 3)])),
        ('multiply_int_same_shape', tf.math.multiply,
         computation_types.StructType([
             computation_types.TensorType(tf.int32),
             computation_types.TensorType(tf.int32),
         ]), [1, 2], 2),
        ('multiply_int_different_shape', tf.math.multiply,
         computation_types.StructType([
             computation_types.TensorType(tf.int32, shape=[1]),
             computation_types.TensorType(tf.int32),
         ]), [np.array([1]), 2], 2),
        ('multiply_int_different_types', tf.math.multiply,
         computation_types.StructType([
             computation_types.StructType(
                 [computation_types.TensorType(tf.int32, shape=[1])]),
             computation_types.TensorType(tf.int32)
         ]), [[np.array([1])], 2], structure.Struct([(None, 2)])),
        ('divide_int_same_shape', tf.math.divide,
         computation_types.StructType([
             computation_types.TensorType(tf.int32),
             computation_types.TensorType(tf.int32),
         ]), [1, 2], 0.5),
        ('divide_int_different_shape', tf.math.divide,
         computation_types.StructType([
             computation_types.TensorType(tf.int32, shape=[1]),
             computation_types.TensorType(tf.int32),
         ]), [np.array([1]), 2], 0.5),
        ('divide_int_different_types', tf.math.divide,
         computation_types.StructType([
             computation_types.StructType(
                 [computation_types.TensorType(tf.int32, shape=[1])]),
             computation_types.TensorType(tf.int32),
         ]), [[np.array([1])], 2], structure.Struct([(None, 0.5)])),
        ('divide_int_same_structure', tf.math.divide,
         computation_types.StructType([
             computation_types.StructType([
                 computation_types.TensorType(tf.int32, shape=[1]),
                 computation_types.TensorType(tf.int32, shape=[1]),
             ]),
             computation_types.StructType([
                 computation_types.TensorType(tf.int32),
                 computation_types.TensorType(tf.int32),
             ]),
         ]), [[np.array([1]), np.array([2])], [2, 8]
              ], structure.Struct([(None, 0.5), (None, 0.25)])),
    )
    # pyformat: enable
    def test_returns_computation(self, operator, type_signature, operands,
                                 expected_result):
        # TODO(b/142795960): arguments in parameterized are called before test main.
        # `tf.constant` will error out on GPU and TPU without proper initialization.
        # A suggested workaround is to use numpy as argument and transform to TF
        # tensor inside the function.
        operands = tf.nest.map_structure(tf.constant, operands)
        proto, _ = tensorflow_computation_factory.create_binary_operator_with_upcast(
            type_signature, operator)

        self.assertIsInstance(proto, pb.Computation)
        actual_type = type_serialization.deserialize_type(proto.type)
        self.assertIsInstance(actual_type, computation_types.FunctionType)
        # Note: It is only useful to test the parameter type; the result type
        # depends on the `operator` used, not the implemenation
        # `create_binary_operator_with_upcast`.
        expected_parameter_type = computation_types.StructType(type_signature)
        self.assertEqual(actual_type.parameter, expected_parameter_type)
        actual_result = test_utils.run_tensorflow(proto, operands)
        self.assertEqual(actual_result, expected_result)

    @parameterized.named_parameters(
        ('different_structures', tf.math.add,
         computation_types.StructType([
             computation_types.StructType([
                 computation_types.TensorType(tf.int32),
             ]),
             computation_types.StructType([
                 computation_types.TensorType(tf.int32),
                 computation_types.TensorType(tf.int32)
             ]),
         ]), [1, [2, 3]]))
    def test_fails(self, operator, type_signature, operands):
        operands = tf.nest.map_structure(tf.constant, operands)
        with self.assertRaises(TypeError):
            tensorflow_computation_factory.create_binary_operator_with_upcast(
                type_signature, operator)
Пример #27
0
from tensorflow_federated.python.aggregators import sampling
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.core.impl.types import computation_types

# Convenience type aliases.
FunctionType = computation_types.FunctionType
SequenceType = computation_types.SequenceType
StructType = computation_types.StructType
StructWithPythonType = computation_types.StructWithPythonType
TensorType = computation_types.TensorType

# Type for the random seed used in sampling is int64 tensor with shape [2].
SEED_TYPE = computation_types.TensorType(tf.int64, shape=[2])
TEST_SEED = 42
RANDOM_VALUE_TYPE = computation_types.TensorType(tf.int32, [None])


def python_container_coercion(structure, type_spec):
    @computations.tf_computation(type_spec)
    def identity(s):
        return tf.nest.map_structure(tf.identity, s)

    return identity(structure)


class BuildReservoirTypeTest(test_case.TestCase):
    def test_scalar(self):
        self.assertEqual(
Пример #28
0
class CreateConstantTest(parameterized.TestCase, test_case.TestCase):

    # pyformat: disable
    @parameterized.named_parameters(
        ('scalar_int', 10, computation_types.TensorType(tf.int32,
                                                        [3]), [10] * 3),
        ('scalar_float', 10.0, computation_types.TensorType(tf.float32,
                                                            [3]), [10.0] * 3),
        ('scalar_with_unnamed_struct_type', 10,
         computation_types.StructType(
             [tf.int32] * 3), structure.Struct([(None, 10)] * 3)),
        ('scalar_with_named_struct_type', 10,
         computation_types.StructType([
             ('a', tf.int32), ('b', tf.int32), ('c', tf.int32)
         ]), structure.Struct([('a', 10), ('b', 10), ('c', 10)])),
        ('scalar_with_nested_struct_type', 10,
         computation_types.StructType([[tf.int32] * 3] * 3),
         structure.Struct([(None, structure.Struct([(None, 10)] * 3))] * 3)),
        ('tuple_with_struct_type', (10, 11, 12),
         computation_types.StructType([tf.int32, tf.int32, tf.int32]),
         structure.Struct([(None, 10), (None, 11), (None, 12)])),
        ('nested_struct_with_nested_struct_type', (10, (11, 12)),
         computation_types.StructType([tf.int32, [tf.int32, tf.int32]]),
         structure.Struct([(None, 10),
                           (None, structure.Struct([(None, 11),
                                                    (None, 12)]))])),
        ('nested_named_struct_with_nested_struct_type',
         collections.OrderedDict(a=10, b=collections.OrderedDict(c=11, d=12)),
         computation_types.StructType(
             collections.OrderedDict(a=tf.int32,
                                     b=collections.OrderedDict(c=tf.int32,
                                                               d=tf.int32))),
         structure.Struct([('a', 10),
                           ('b', structure.Struct([('c', 11), ('d', 12)]))])),
        ('unnamed_value_named_type',
         (10.0, ), computation_types.StructType(
             [('a', tf.float32)]), structure.Struct([('a', 10.0)])),
    )
    # pyformat: enable
    def test_returns_computation(self, value, type_signature, expected_result):
        proto, _ = tensorflow_computation_factory.create_constant(
            value, type_signature)

        self.assertIsInstance(proto, pb.Computation)
        actual_type = type_serialization.deserialize_type(proto.type)
        expected_type = computation_types.FunctionType(None, type_signature)
        expected_type.check_assignable_from(actual_type)
        actual_result = test_utils.run_tensorflow(proto)
        if isinstance(expected_result, list):
            self.assertCountEqual(actual_result, expected_result)
        else:
            self.assertEqual(actual_result, expected_result)

    @parameterized.named_parameters(
        ('non_scalar_value', np.zeros(
            [1]), computation_types.TensorType(tf.int32)),
        ('none_type', 10, None),
        ('federated_type', 10, computation_types.at_server(tf.int32)),
        ('bad_type', 10.0, computation_types.TensorType(tf.int32)),
        ('value_structure_larger_than_type_structure',
         (10.0, 11.0), computation_types.StructType([tf.float32])),
        ('value_structure_smaller_than_type_structure', (10.0, ),
         computation_types.StructType([(None, tf.float32),
                                       (None, tf.float32)])),
        ('named_value_unnamed_type', collections.OrderedDict(a=10.0),
         computation_types.StructType([(None, tf.float32)])),
    )
    def test_raises_type_error(self, value, type_signature):
        with self.assertRaises(TypeError):
            tensorflow_computation_factory.create_constant(
                value, type_signature)
Пример #29
0
 def test_raises_with_server_cardinality_specified(self):
     with self.assertRaises(TypeError):
         data_descriptor.DataDescriptor(
             federated_computation.federated_computation(
                 lambda x: intrinsics.federated_value(x, placements.SERVER),
                 tf.int32), 1000, computation_types.TensorType(tf.int32), 3)
Пример #30
0
 def test_with_int_vector(self):
     type_signature = computation_types.TensorType(tf.int32, [10])
     dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(
         type_signature)
     self.assert_nested_struct_eq(dtypes, tf.int32)
     self.assert_nested_struct_eq(shapes, tf.TensorShape([10]))