class ExecutorsTest(parameterized.TestCase):

  @executor_test_utils.executors
  def test_without_arguments(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors()
  def test_with_no_arguments(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors(
      ('local', executor_stacks.local_executor_factory()),)
  def test_with_one_argument(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors(
      ('local', executor_stacks.local_executor_factory()),
      ('sizing', executor_stacks.sizing_executor_factory()),
  )
  def test_with_two_argument(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)
def test_context(rpc_mode='REQUEST_REPLY'):
    port = portpicker.pick_unused_port()
    server_pool = logging_pool.pool(max_workers=1)
    server = grpc.server(server_pool)
    server.add_insecure_port('[::]:{}'.format(port))
    target_executor = executor_stacks.local_executor_factory(
        num_clients=3).create_executor({})
    tracer = executor_test_utils.TracingExecutor(target_executor)
    service = executor_service.ExecutorService(tracer)
    executor_pb2_grpc.add_ExecutorServicer_to_server(service, server)
    server.start()
    channel = grpc.insecure_channel('localhost:{}'.format(port))
    remote_exec = remote_executor.RemoteExecutor(channel, rpc_mode)
    executor = reference_resolving_executor.ReferenceResolvingExecutor(
        remote_exec)
    try:
        yield collections.namedtuple('_', 'executor tracer')(executor, tracer)
    finally:
        executor.close()
        tracer.close()
        try:
            channel.close()
        except AttributeError:
            pass  # Public gRPC channel doesn't support close()
        finally:
            server.stop(None)
Ejemplo n.º 3
0
    def test_in_executor_stack(self):
        type_spec = computation_types.SequenceType(tf.int64)
        ex = data_executor.DataExecutor(
            eager_tf_executor.EagerTFExecutor(),
            TestDataBackend(self, 'foo://bar', tf.data.Dataset.range(5),
                            type_spec))
        ex_fn = lambda device: ex
        factory = executor_stacks.local_executor_factory(
            leaf_executor_fn=ex_fn)
        context = execution_context.ExecutionContext(executor_fn=factory)

        @computations.tf_computation(type_spec)
        def foo(ds):
            return tf.cast(ds.reduce(np.int64(0), lambda p, q: p + q),
                           tf.int32)

        @computations.federated_computation
        def bar():
            ds = tff_data.data('foo://bar', type_spec)
            return foo(ds)

        with context_stack_impl.context_stack.install(context):
            result = bar()

        self.assertEqual(result, 10)
Ejemplo n.º 4
0
    def test_one_arg_tf_computation_with_int_param_and_result(self):
        @computations.tf_computation(tf.int32)
        def comp(x):
            return tf.add(x, 10)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp(3)

        self.assertEqual(result, 13)
Ejemplo n.º 5
0
    def test_simple_no_arg_tf_computation_with_int_result(self):
        @computations.tf_computation
        def comp():
            return tf.constant(10)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertEqual(result, 10)
Ejemplo n.º 6
0
    def test_three_arg_tf_computation_with_int_params_and_result(self):
        @computations.tf_computation(tf.int32, tf.int32, tf.int32)
        def comp(x, y, z):
            return tf.multiply(tf.add(x, y), z)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp(3, 4, 5)

        self.assertEqual(result, 35)
Ejemplo n.º 7
0
    def test_tf_computation_with_dataset_params_and_int_result(self):
        @computations.tf_computation(computation_types.SequenceType(tf.int32))
        def comp(ds):
            return ds.reduce(np.int32(0), lambda x, y: x + y)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            ds = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32))
            result = comp(ds)

        self.assertEqual(result, 45)
def _create_concurrent_maxthread_tuples():
  tuples = []
  for concurrency in range(1, 5):
    local_ex_string = 'local_executor_{}_client_thread'.format(concurrency)
    ex_factory = executor_stacks.local_executor_factory(
        num_client_executors=concurrency)
    tuples.append((local_ex_string, ex_factory, concurrency))
    sizing_ex_string = 'sizing_executor_{}_client_thread'.format(concurrency)
    ex_factory = executor_stacks.sizing_executor_factory(
        num_client_executors=concurrency)
    tuples.append((sizing_ex_string, ex_factory, concurrency))
  return tuples
Ejemplo n.º 9
0
    def test_tuple_argument_can_accept_unnamed_elements(self):
        @computations.tf_computation(tf.int32, tf.int32)
        def foo(x, y):
            return x + y

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            # pylint:disable=no-value-for-parameter
            result = foo(structure.Struct([(None, 2), (None, 3)]))
            # pylint:enable=no-value-for-parameter

        self.assertEqual(result, 5)
Ejemplo n.º 10
0
 def decorator(fn, *named_executors):
   """Construct a custom `parameterized.named_parameter` decorator for `fn`."""
   if not named_executors:
     named_executors = [
         ('reference', reference_executor.ReferenceExecutor(compiler=None)),
         ('local', executor_stacks.local_executor_factory()),
     ]
   named_parameters_decorator = parameterized.named_parameters(
       *named_executors)
   fn = executor_decorator(fn)
   fn = named_parameters_decorator(fn)
   return fn
Ejemplo n.º 11
0
    def test_tf_computation_with_structured_result(self):
        @computations.tf_computation
        def comp():
            return collections.OrderedDict([
                ('a', tf.constant(10)),
                ('b', tf.constant(20)),
            ])

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertIsInstance(result, collections.OrderedDict)
        self.assertDictEqual(result, {'a': 10, 'b': 20})
Ejemplo n.º 12
0
def create_local_execution_context(num_clients=None,
                                   max_fanout=100,
                                   clients_per_thread=1,
                                   server_tf_device=None,
                                   client_tf_devices=tuple()):
  """Creates an execution context that executes computations locally."""
  factory = executor_stacks.local_executor_factory(
      num_clients=num_clients,
      max_fanout=max_fanout,
      clients_per_thread=clients_per_thread,
      server_tf_device=server_tf_device,
      client_tf_devices=client_tf_devices)
  return execution_context.ExecutionContext(
      executor_fn=factory, compiler_fn=compiler.transform_to_native_form)
Ejemplo n.º 13
0
 def test_local_executor_multi_gpus_iter_dataset(self, tf_device):
   tf_devices = tf.config.list_logical_devices(tf_device)
   server_tf_device = None if not tf_devices else tf_devices[0]
   gpu_devices = tf.config.list_logical_devices('GPU')
   local_executor = executor_stacks.local_executor_factory(
       server_tf_device=server_tf_device, client_tf_devices=gpu_devices)
   with executor_test_utils.install_executor(local_executor):
     parallel_client_run = _create_tff_parallel_clients_with_iter_dataset()
     client_data = [
         tf.data.Dataset.range(10),
         tf.data.Dataset.range(10).map(lambda x: x + 1)
     ]
     client_results = parallel_client_run(client_data)
     self.assertEqual(client_results, [np.int64(46), np.int64(56)])
Ejemplo n.º 14
0
  def decorator(fn, *named_executors):
    if not named_executors:
      named_executors = [
          ('local', executor_stacks.local_executor_factory()),
          ('sizing', executor_stacks.sizing_executor_factory()),
      ]

    @parameterized.named_parameters(*named_executors)
    def wrapped_fn(self, executor):
      """Install a particular execution context before running `fn`."""
      context = execution_context.ExecutionContext(executor)
      with context_stack_impl.context_stack.install(context):
        fn(self)

    return wrapped_fn
Ejemplo n.º 15
0
    def test_changing_cardinalities_across_calls(self):
        @computations.federated_computation(type_factory.at_clients(tf.int32))
        def comp(x):
            return x

        five_ints = list(range(5))
        ten_ints = list(range(10))

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            five = comp(five_ints)
            ten = comp(ten_ints)

        self.assertEqual(five, five_ints)
        self.assertEqual(ten, ten_ints)
Ejemplo n.º 16
0
def set_local_execution_context(num_clients=None,
                                max_fanout=100,
                                num_client_executors=32,
                                server_tf_device=None,
                                client_tf_devices=tuple()):
    """Sets an execution context that executes computations locally."""
    factory = executor_stacks.local_executor_factory(
        num_clients=num_clients,
        max_fanout=max_fanout,
        num_client_executors=num_client_executors,
        server_tf_device=server_tf_device,
        client_tf_devices=client_tf_devices)
    context = execution_context.ExecutionContext(
        executor_fn=factory, compiler_fn=compiler.transform_to_native_form)
    context_stack_impl.context_stack.set_default_context(context)
Ejemplo n.º 17
0
    def test_conflicting_cardinalities_within_call(self):
        @computations.federated_computation([
            computation_types.at_clients(tf.int32),
            computation_types.at_clients(tf.int32),
        ])
        def comp(x):
            return x

        five_ints = list(range(5))
        ten_ints = list(range(10))

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            with self.assertRaisesRegex(ValueError,
                                        'Conflicting cardinalities'):
                comp([five_ints, ten_ints])
Ejemplo n.º 18
0
def create_local_execution_context():
    """Creates an XLA-based local execution context.

  NOTE: This context is only directly backed by an XLA executor. It does not
  support any intrinsics, lambda expressions, etc.

  Returns:
    An instance of `execution_context.ExecutionContext` backed by XLA executor.
  """
    # TODO(b/175888145): Extend this into a complete local executor stack.

    factory = executor_stacks.local_executor_factory(
        support_sequence_ops=True,
        leaf_executor_fn=executor.XlaExecutor,
        local_computation_factory=compiler.XlaComputationFactory())
    return execution_context.ExecutionContext(executor_fn=factory)
Ejemplo n.º 19
0
def create_test_execution_context(default_num_clients=0, clients_per_thread=1):
    """Creates an execution context that executes computations locally."""
    factory = executor_stacks.local_executor_factory(
        default_num_clients=default_num_clients,
        clients_per_thread=clients_per_thread)

    def compiler(comp):
        # Compile secure_sum and secure_sum_bitwidth intrinsics to insecure
        # TensorFlow computations for testing purposes.
        replaced_intrinsic_bodies, _ = intrinsic_reductions.replace_secure_intrinsics_with_insecure_bodies(
            comp.to_building_block())
        return computation_wrapper_instances.building_block_to_computation(
            replaced_intrinsic_bodies)

    return sync_execution_context.ExecutionContext(executor_fn=factory,
                                                   compiler_fn=compiler)
Ejemplo n.º 20
0
 def test_local_executor_multi_gpus_dataset_reduce(self, tf_device):
   tf_devices = tf.config.list_logical_devices(tf_device)
   server_tf_device = None if not tf_devices else tf_devices[0]
   gpu_devices = tf.config.list_logical_devices('GPU')
   local_executor = executor_stacks.local_executor_factory(
       server_tf_device=server_tf_device, client_tf_devices=gpu_devices)
   with executor_test_utils.install_executor(local_executor):
     parallel_client_run = _create_tff_parallel_clients_with_dataset_reduce()
     client_data = [
         tf.data.Dataset.range(10),
         tf.data.Dataset.range(10).map(lambda x: x + 1)
     ]
     # TODO(b/159180073): merge this one into iter dataset test when the
     # dataset reduce function can be correctly used for GPU device.
     with self.assertRaisesRegex(
         ValueError,
         'Detected dataset reduce op in multi-GPU TFF simulation.*'):
       parallel_client_run(client_data)
Ejemplo n.º 21
0
def test_context(rpc_mode='REQUEST_REPLY'):
    port = portpicker.pick_unused_port()
    server_pool = logging_pool.pool(max_workers=1)
    server = grpc.server(server_pool)
    server.add_insecure_port('[::]:{}'.format(port))
    target_factory = executor_stacks.local_executor_factory(num_clients=3)
    tracers = []

    def _tracer_fn(cardinalities):
        tracer = executor_test_utils.TracingExecutor(
            target_factory.create_executor(cardinalities))
        tracers.append(tracer)
        return tracer

    service = executor_service.ExecutorService(
        executor_stacks.ResourceManagingExecutorFactory(_tracer_fn))
    executor_pb2_grpc.add_ExecutorServicer_to_server(service, server)
    server.start()

    channel = grpc.insecure_channel('localhost:{}'.format(port))
    stub = executor_pb2_grpc.ExecutorStub(channel)
    serialized_cards = executor_service_utils.serialize_cardinalities(
        {placement_literals.CLIENTS: 3})
    stub.SetCardinalities(
        executor_pb2.SetCardinalitiesRequest(cardinalities=serialized_cards))

    remote_exec = remote_executor.RemoteExecutor(channel, rpc_mode)
    executor = reference_resolving_executor.ReferenceResolvingExecutor(
        remote_exec)
    try:
        yield collections.namedtuple('_', 'executor tracers')(executor,
                                                              tracers)
    finally:
        executor.close()
        for tracer in tracers:
            tracer.close()
        try:
            channel.close()
        except AttributeError:
            pass  # Public gRPC channel doesn't support close()
        finally:
            server.stop(None)
Ejemplo n.º 22
0
  def decorator(fn, *named_executors):
    if isinstance(fn, type):
      raise TypeError('Do not directly decorate classes with the executors '
                      'decorator; this will cause the tests to be skipped. '
                      'Decorate the member test functions instead.')

    if not named_executors:
      named_executors = [
          ('local', executor_stacks.local_executor_factory()),
          ('sizing', executor_stacks.sizing_executor_factory()),
      ]

    @parameterized.named_parameters(*named_executors)
    def wrapped_fn(self, executor):
      """Install a particular execution context before running `fn`."""
      context = execution_context.ExecutionContext(executor)
      with context_stack_impl.context_stack.install(context):
        fn(self)

    return wrapped_fn
Ejemplo n.º 23
0
def create_local_execution_context(default_num_clients: int = 0,
                                   max_fanout=100,
                                   clients_per_thread=1,
                                   server_tf_device=None,
                                   client_tf_devices=tuple(),
                                   reference_resolving_clients=False):
  """Creates an execution context that executes computations locally."""
  factory = executor_stacks.local_executor_factory(
      default_num_clients=default_num_clients,
      max_fanout=max_fanout,
      clients_per_thread=clients_per_thread,
      server_tf_device=server_tf_device,
      client_tf_devices=client_tf_devices,
      reference_resolving_clients=reference_resolving_clients)

  def _compiler(comp):
    native_form = compiler.transform_to_native_form(
        comp, transform_math_to_tf=not reference_resolving_clients)
    return native_form

  return execution_context.ExecutionContext(
      executor_fn=factory, compiler_fn=_compiler)
Ejemplo n.º 24
0
def _create_concurrent_maxthread_tuples():
    tuples = []
    for concurrency in range(1, 5):
        local_ex_string = 'local_executor_{}_clients_per_thread'.format(
            concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.local_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (local_ex_string, ex_factory, concurrency, tf_executor_mock))
        sizing_ex_string = 'sizing_executor_{}_client_thread'.format(
            concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.sizing_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (sizing_ex_string, ex_factory, concurrency, tf_executor_mock))
        debug_ex_string = 'debug_executor_{}_client_thread'.format(concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.thread_debugging_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (debug_ex_string, ex_factory, concurrency, tf_executor_mock))
    return tuples
Ejemplo n.º 25
0
class IntrinsicsTest(parameterized.TestCase):

  def assert_type(self, value, type_string):
    self.assertEqual(value.type_signature.compact_representation(), type_string)

  def test_intrinsic_construction_raises_context_error_outside_decorator(self):

    @computations.tf_computation()
    def return_2():
      return 2

    with self.assertRaises(context_base.ContextError):
      intrinsics.federated_eval(return_2, placements.SERVER)

  def test_federated_broadcast_with_server_all_equal_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.SERVER))
    def foo(x):
      val = intrinsics.federated_broadcast(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(int32@SERVER -> int32@CLIENTS)')

  def test_federated_broadcast_with_server_non_all_equal_int(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(
              tf.int32, placements.SERVER, all_equal=False))
      def _(x):
        return intrinsics.federated_broadcast(x)

  def test_federated_broadcast_with_client_int(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
      def _(x):
        return intrinsics.federated_broadcast(x)

  def test_federated_broadcast_with_non_federated_val(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(tf.int32)
      def _(x):
        return intrinsics.federated_broadcast(x)

  def test_federated_eval_rand_on_clients(self):

    @computations.federated_computation
    def rand_on_clients():

      @computations.tf_computation
      def rand():
        return tf.random.normal([])

      val = intrinsics.federated_eval(rand, placements.CLIENTS)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(rand_on_clients, '( -> {float32}@CLIENTS)')

  def test_federated_eval_rand_on_server(self):

    @computations.federated_computation
    def rand_on_server():

      @computations.tf_computation
      def rand():
        return tf.random.normal([])

      val = intrinsics.federated_eval(rand, placements.SERVER)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(rand_on_server, '( -> float32@SERVER)')

  def test_federated_map_with_client_all_equal_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
    def foo(x):
      val = intrinsics.federated_map(
          computations.tf_computation(lambda x: x > 10), x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(int32@CLIENTS -> {bool}@CLIENTS)')

  def test_federated_map_with_client_non_all_equal_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_map(
          computations.tf_computation(lambda x: x > 10), x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> {bool}@CLIENTS)')

  def test_federated_map_with_server_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.SERVER))
    def foo(x):
      val = intrinsics.federated_map(
          computations.tf_computation(lambda x: x > 10), x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(int32@SERVER -> bool@SERVER)')

  def test_federated_map_injected_zip_with_server_int(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.SERVER),
        computation_types.FederatedType(tf.int32, placements.SERVER)
    ])
    def foo(x, y):
      val = intrinsics.federated_map(
          computations.tf_computation(lambda x, y: x > 10,
                                     ), [x, y])
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<int32@SERVER,int32@SERVER> -> bool@SERVER)')

  def test_federated_map_injected_zip_fails_different_placements(self):

    def foo(x, y):
      val = intrinsics.federated_map(
          computations.tf_computation(lambda x, y: x > 10,
                                     ), [x, y])
      self.assertIsInstance(val, value_base.Value)
      return val

    with self.assertRaisesRegex(
        TypeError,
        'The value to be mapped must be a FederatedType or implicitly '
        'convertible to a FederatedType.'):

      computations.federated_computation(foo, [
          computation_types.FederatedType(tf.int32, placements.SERVER),
          computation_types.FederatedType(tf.int32, placements.CLIENTS)
      ])

  def test_federated_map_with_non_federated_val(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(tf.int32)
      def _(x):
        return intrinsics.federated_map(
            computations.tf_computation(lambda x: x > 10), x)

  def test_federated_sum_with_client_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_sum(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')

  def test_federated_sum_with_client_string(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(tf.string, placements.CLIENTS))
      def _(x):
        return intrinsics.federated_sum(x)

  def test_federated_sum_with_server_int(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(tf.int32, placements.SERVER))
      def _(x):
        return intrinsics.federated_sum(x)

  def test_federated_zip_with_client_non_all_equal_int_and_bool(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.CLIENTS),
        computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
    ])
    def foo(x, y):
      val = intrinsics.federated_zip([x, y])
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')

  def test_federated_zip_with_single_unnamed_int_client(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.CLIENTS),
    ])
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<{int32}@CLIENTS> -> {<int32>}@CLIENTS)')

  def test_federated_zip_with_single_unnamed_int_server(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.SERVER),
    ])
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<int32@SERVER> -> <int32>@SERVER)')

  def test_federated_zip_with_single_named_bool_clients(self):

    @computations.federated_computation([
        ('a', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
    ])
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<a={bool}@CLIENTS> -> {<a=bool>}@CLIENTS)')

  def test_federated_zip_with_single_named_bool_server(self):

    @computations.federated_computation([
        ('a', computation_types.FederatedType(tf.bool, placements.SERVER)),
    ])
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<a=bool@SERVER> -> <a=bool>@SERVER)')

  def test_federated_zip_with_names_client_non_all_equal_int_and_bool(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.CLIENTS),
        computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
    ])
    def foo(x, y):
      a = {'x': x, 'y': y}
      val = intrinsics.federated_zip(a)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')

  def test_federated_zip_with_client_all_equal_int_and_bool(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
        computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
    ])
    def foo(x, y):
      val = intrinsics.federated_zip([x, y])
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')

  def test_federated_zip_with_names_client_all_equal_int_and_bool(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
        computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
    ])
    def foo(arg):
      a = {'x': arg[0], 'y': arg[1]}
      val = intrinsics.federated_zip(a)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')

  def test_federated_zip_with_server_int_and_bool(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.int32, placements.SERVER),
        computation_types.FederatedType(tf.bool, placements.SERVER)
    ])
    def foo(x, y):
      val = intrinsics.federated_zip([x, y])
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(<int32@SERVER,bool@SERVER> -> <int32,bool>@SERVER)')

  def test_federated_zip_with_names_server_int_and_bool(self):

    @computations.federated_computation([
        ('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
        ('b', computation_types.FederatedType(tf.bool, placements.SERVER)),
    ])
    def foo(arg):
      val = intrinsics.federated_zip(arg)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<a=int32@SERVER,b=bool@SERVER> -> <a=int32,b=bool>@SERVER)')

  def test_federated_zip_error_different_placements(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation([
          ('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
          ('b', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
      ])
      def _(arg):
        return intrinsics.federated_zip(arg)

  def test_federated_collect_with_client_int(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_collect(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> int32*@SERVER)')

  def test_federated_collect_with_server_int_fails(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(tf.int32, placements.SERVER))
      def _(x):
        return intrinsics.federated_collect(x)

  def test_federated_mean_with_client_float32_without_weight(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.float32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_mean(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({float32}@CLIENTS -> float32@SERVER)')

  def test_federated_mean_with_all_equal_client_float32_without_weight(self):
    federated_all_equal_float = computation_types.FederatedType(
        tf.float32, placements.CLIENTS, all_equal=True)

    @computations.federated_computation(federated_all_equal_float)
    def foo(x):
      val = intrinsics.federated_mean(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')

  def test_federated_mean_with_all_equal_client_float32_with_weight(self):
    federated_all_equal_float = computation_types.FederatedType(
        tf.float32, placements.CLIENTS, all_equal=True)

    @computations.federated_computation(federated_all_equal_float)
    def foo(x):
      val = intrinsics.federated_mean(x, x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')

  def test_federated_mean_with_client_tuple_with_int32_weight(self):

    @computations.federated_computation([
        computation_types.FederatedType([('x', tf.float64), ('y', tf.float64)],
                                        placements.CLIENTS),
        computation_types.FederatedType(tf.int32, placements.CLIENTS)
    ])
    def foo(x, y):
      val = intrinsics.federated_mean(x, y)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(
        foo, '(<{<x=float64,y=float64>}@CLIENTS,{int32}@CLIENTS> '
        '-> <x=float64,y=float64>@SERVER)')

  def test_federated_mean_with_client_int32_fails(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation(
          computation_types.FederatedType(tf.int32, placements.CLIENTS))
      def _(x):
        return intrinsics.federated_mean(x)

  def test_federated_mean_with_string_weight_fails(self):
    with self.assertRaises(TypeError):

      @computations.federated_computation([
          computation_types.FederatedType(tf.float32, placements.CLIENTS),
          computation_types.FederatedType(tf.string, placements.CLIENTS)
      ])
      def _(x, y):
        return intrinsics.federated_mean(x, y)

  def test_federated_aggregate_with_client_int(self):
    # The representation used during the aggregation process will be a named
    # tuple with 2 elements - the integer 'total' that represents the sum of
    # elements encountered, and the integer element 'count'.
    # pylint: disable=invalid-name
    Accumulator = collections.namedtuple('Accumulator', 'total count')
    # pylint: enable=invalid-name

    # The operator to use during the first stage simply adds an element to the
    # total and updates the count.
    @computations.tf_computation
    def accumulate(accu, elem):
      return Accumulator(accu.total + elem, accu.count + 1)

    # The operator to use during the second stage simply adds total and count.
    @computations.tf_computation
    def merge(x, y):
      return Accumulator(x.total + y.total, x.count + y.count)

    # The operator to use during the final stage simply computes the ratio.
    @computations.tf_computation
    def report(accu):
      return tf.cast(accu.total, tf.float32) / tf.cast(accu.count, tf.float32)

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_aggregate(x, Accumulator(0, 0), accumulate,
                                           merge, report)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> float32@SERVER)')

  def test_federated_aggregate_with_federated_zero_fails(self):

    @computations.federated_computation()
    def build_federated_zero():
      val = intrinsics.federated_value(0, placements.SERVER)
      self.assertIsInstance(val, value_base.Value)
      return val

    @computations.tf_computation([tf.int32, tf.int32])
    def accumulate(accu, elem):
      return accu + elem

    # The operator to use during the second stage simply adds total and count.
    @computations.tf_computation([tf.int32, tf.int32])
    def merge(x, y):
      return x + y

    # The operator to use during the final stage simply computes the ratio.
    @computations.tf_computation(tf.int32)
    def report(accu):
      return accu

    def foo(x):
      return intrinsics.federated_aggregate(x, build_federated_zero(),
                                            accumulate, merge, report)

    with self.assertRaisesRegex(
        TypeError, 'Expected `zero` to be assignable to type int32, '
        'but was of incompatible type int32@SERVER'):
      computations.federated_computation(
          foo, computation_types.FederatedType(tf.int32, placements.CLIENTS))

  def test_federated_aggregate_with_unknown_dimension(self):
    Accumulator = collections.namedtuple('Accumulator', ['samples'])  # pylint: disable=invalid-name
    accumulator_type = computation_types.NamedTupleType(
        Accumulator(
            samples=computation_types.TensorType(dtype=tf.int32, shape=[None])))

    @computations.tf_computation()
    def build_empty_accumulator():
      return Accumulator(samples=tf.zeros(shape=[0], dtype=tf.int32))

    # The operator to use during the first stage simply adds an element to the
    # tensor, increasing its size.
    @computations.tf_computation([accumulator_type, tf.int32])
    def accumulate(accu, elem):
      return Accumulator(
          samples=tf.concat(
              [accu.samples, tf.expand_dims(elem, axis=0)], axis=0))

    # The operator to use during the second stage simply adds total and count.
    @computations.tf_computation([accumulator_type, accumulator_type])
    def merge(x, y):
      return Accumulator(samples=tf.concat([x.samples, y.samples], axis=0))

    # The operator to use during the final stage simply computes the ratio.
    @computations.tf_computation(accumulator_type)
    def report(accu):
      return accu

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      val = intrinsics.federated_aggregate(x, build_empty_accumulator(),
                                           accumulate, merge, report)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> <samples=int32[?]>@SERVER)')

  def test_federated_reduce_with_tf_add_raw_constant(self):

    @computations.federated_computation(
        computation_types.FederatedType(tf.int32, placements.CLIENTS))
    def foo(x):
      plus = computations.tf_computation(tf.add)
      val = intrinsics.federated_reduce(x, 0, plus)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')

  def test_num_over_temperature_threshold_example(self):

    @computations.federated_computation([
        computation_types.FederatedType(tf.float32, placements.CLIENTS),
        computation_types.FederatedType(tf.float32, placements.SERVER)
    ])
    def foo(temperatures, threshold):
      val = intrinsics.federated_sum(
          intrinsics.federated_map(
              computations.tf_computation(
                  lambda x, y: tf.cast(tf.greater(x, y), tf.int32)),
              [temperatures,
               intrinsics.federated_broadcast(threshold)]))
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo,
                     '(<{float32}@CLIENTS,float32@SERVER> -> int32@SERVER)')

  @parameterized.named_parameters(('test_n_2', 2), ('test_n_3', 3),
                                  ('test_n_5', 5))
  def test_n_tuple_federated_zip_tensor_args(self, n):
    fed_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)
    initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
    final_fed_type = computation_types.FederatedType([tf.int32] * n,
                                                     placements.CLIENTS)
    function_type = computation_types.FunctionType(initial_tuple_type,
                                                   final_fed_type)

    @computations.federated_computation(
        [computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, function_type.compact_representation())

  @parameterized.named_parameters(
      ('test_n_2_int', 2,
       computation_types.FederatedType(tf.int32, placements.CLIENTS)),
      ('test_n_3_int', 3,
       computation_types.FederatedType(tf.int32, placements.CLIENTS)),
      ('test_n_5_int', 5,
       computation_types.FederatedType(tf.int32, placements.CLIENTS)),
      ('test_n_2_tuple', 2,
       computation_types.FederatedType([tf.int32, tf.int32],
                                       placements.CLIENTS)),
      ('test_n_3_tuple', 3,
       computation_types.FederatedType([tf.int32, tf.int32],
                                       placements.CLIENTS)),
      ('test_n_5_tuple', 5,
       computation_types.FederatedType([tf.int32, tf.int32],
                                       placements.CLIENTS)))
  def test_named_n_tuple_federated_zip(self, n, fed_type):
    initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
    named_fed_type = computation_types.FederatedType(
        [(str(k), fed_type.member) for k in range(n)], placements.CLIENTS)
    mixed_fed_type = computation_types.FederatedType(
        [(str(k), fed_type.member) if k % 2 == 0 else fed_type.member
         for k in range(n)], placements.CLIENTS)
    named_function_type = computation_types.FunctionType(
        initial_tuple_type, named_fed_type)
    mixed_function_type = computation_types.FunctionType(
        initial_tuple_type, mixed_fed_type)

    @computations.federated_computation([fed_type] * n)
    def foo(x):
      arg = {str(k): x[k] for k in range(n)}
      val = intrinsics.federated_zip(arg)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, named_function_type.compact_representation())

    def _make_test_tuple(x, k):
      """Make a test tuple with a name if k is even, otherwise unnamed."""
      if k % 2 == 0:
        return str(k), x[k]
      else:
        return None, x[k]

    @computations.federated_computation([fed_type] * n)
    def bar(x):
      arg = anonymous_tuple.AnonymousTuple(
          _make_test_tuple(x, k) for k in range(n))
      val = intrinsics.federated_zip(arg)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(bar, mixed_function_type.compact_representation())

  @parameterized.named_parameters([
      ('test_n_' + str(n) + '_m_' + str(m), n, m)
      for n, m in itertools.product([1, 2, 3], [1, 2, 3])
  ])
  def test_n_tuple_federated_zip_mixed_args(self, n, m):
    tuple_fed_type = computation_types.FederatedType([tf.int32, tf.int32],
                                                     placements.CLIENTS)
    single_fed_type = computation_types.FederatedType(tf.int32,
                                                      placements.CLIENTS)
    initial_tuple_type = computation_types.NamedTupleType([tuple_fed_type] * n +
                                                          [single_fed_type] * m)
    final_fed_type = computation_types.FederatedType(
        [[tf.int32, tf.int32]] * n + [tf.int32] * m, placements.CLIENTS)
    function_type = computation_types.FunctionType(initial_tuple_type,
                                                   final_fed_type)

    @computations.federated_computation([
        computation_types.FederatedType(
            computation_types.NamedTupleType([tf.int32, tf.int32]),
            placements.CLIENTS)
    ] * n + [computation_types.FederatedType(tf.int32, placements.CLIENTS)] * m)
    def baz(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(baz, function_type.compact_representation())

  def test_federated_apply_raises_warning(self):
    with warnings.catch_warnings(record=True) as w:
      warnings.simplefilter('always')

      @computations.federated_computation(
          computation_types.FederatedType(tf.int32, placements.SERVER))
      def foo(x):
        val = intrinsics.federated_apply(
            computations.tf_computation(lambda x: x * x), x)
        self.assertIsInstance(val, value_base.Value)
        return val

      self.assertLen(w, 1)
      self.assertIsInstance(w[0].category(), DeprecationWarning)
      self.assertIn('tff.federated_apply() is deprecated', str(w[0].message))
      self.assert_type(foo, '(int32@SERVER -> int32@SERVER)')

  def test_federated_value_with_bool_on_clients(self):

    @computations.federated_computation(tf.bool)
    def foo(x):
      val = intrinsics.federated_value(x, placements.CLIENTS)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(bool -> bool@CLIENTS)')

  def test_federated_value_raw_np_scalar(self):

    @computations.federated_computation
    def test_np_values():
      floatv = np.float64(0)
      tff_float = intrinsics.federated_value(floatv, placements.SERVER)
      self.assertIsInstance(tff_float, value_base.Value)
      self.assert_type(tff_float, 'float64@SERVER')
      intv = np.int64(0)
      tff_int = intrinsics.federated_value(intv, placements.SERVER)
      self.assertIsInstance(tff_int, value_base.Value)
      self.assert_type(tff_int, 'int64@SERVER')
      return (tff_float, tff_int)

    floatv, intv = test_np_values()
    self.assertEqual(floatv, 0.0)
    self.assertEqual(intv, 0)

  def test_federated_value_raw_tf_scalar_variable(self):
    v = tf.Variable(initial_value=0., name='test_var')
    with self.assertRaisesRegex(
        TypeError, 'TensorFlow construct (.*) has been '
        'encountered in a federated context.'):
      _ = intrinsics.federated_value(v, placements.SERVER)

  def test_federated_value_with_bool_on_server(self):

    @computations.federated_computation(tf.bool)
    def foo(x):
      val = intrinsics.federated_value(x, placements.SERVER)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo, '(bool -> bool@SERVER)')

  def test_sequence_sum(self):

    @computations.federated_computation(
        computation_types.SequenceType(tf.int32))
    def foo1(x):
      val = intrinsics.sequence_sum(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo1, '(int32* -> int32)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.SERVER))
    def foo2(x):
      val = intrinsics.sequence_sum(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.CLIENTS))
    def foo3(x):
      val = intrinsics.sequence_sum(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')

  def test_sequence_map(self):

    @computations.tf_computation(tf.int32)
    def over_threshold(x):
      return x > 10

    @computations.federated_computation(
        computation_types.SequenceType(tf.int32))
    def foo1(x):
      val = intrinsics.sequence_map(over_threshold, x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo1, '(int32* -> bool*)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.SERVER))
    def foo2(x):
      val = intrinsics.sequence_map(over_threshold, x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo2, '(int32*@SERVER -> bool*@SERVER)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.CLIENTS))
    def foo3(x):
      val = intrinsics.sequence_map(over_threshold, x)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo3, '({int32*}@CLIENTS -> {bool*}@CLIENTS)')

  def test_sequence_reduce(self):
    add_numbers = computations.tf_computation(tf.add, [tf.int32, tf.int32])

    @computations.federated_computation(
        computation_types.SequenceType(tf.int32))
    def foo1(x):
      val = intrinsics.sequence_reduce(x, 0, add_numbers)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo1, '(int32* -> int32)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.SERVER))
    def foo2(x):
      val = intrinsics.sequence_reduce(x, 0, add_numbers)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')

    @computations.federated_computation(
        computation_types.FederatedType(
            computation_types.SequenceType(tf.int32), placements.CLIENTS))
    def foo3(x):
      val = intrinsics.sequence_reduce(x, 0, add_numbers)
      self.assertIsInstance(val, value_base.Value)
      return val

    self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')

  @executor_test_utils.executors(
      ('local', executor_stacks.local_executor_factory()),)
  def test_federated_zip_with_twenty_elements_local_executor(self):

    n = 20
    n_clients = 2

    @computations.federated_computation(
        [computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
    def foo(x):
      val = intrinsics.federated_zip(x)
      self.assertIsInstance(val, value_base.Value)
      return val

    data = [list(range(n_clients)) for _ in range(n)]

    # This would not have ever returned when local executor was scaling
    # factorially with number of elements zipped
    foo(data)
Ejemplo n.º 26
0
def initialize_default_execution_context():
  factory = executor_stacks.local_executor_factory()
  context = execution_context.ExecutionContext(factory)
  context_stack_impl.context_stack.set_default_context(context)
Ejemplo n.º 27
0
                'The return type of next_fn must be assignable to the first parameter'
        ):

            @computations.federated_computation(tf.int32)
            def add_bad_result(_):
                return 0.0

            iterative_process.IterativeProcess(initialize_fn=initialize,
                                               next_fn=add_bad_result)

        with self.assertRaisesRegex(
                TypeError,
                'The return type of next_fn must be assignable to the first parameter'
        ):

            @computations.federated_computation(tf.int32)
            def add_bad_multi_result(_):
                return 0.0, 0

            iterative_process.IterativeProcess(initialize_fn=initialize,
                                               next_fn=add_bad_multi_result)


if __name__ == '__main__':
    # Note: num_clients must be explicit here to correctly test the broadcast
    # behavior. Otherwise TFF will infer there are zero clients, which is an
    # error.
    executor = executor_stacks.local_executor_factory(num_clients=3)
    default_executor.set_default_executor(executor)
    test.main()
Ejemplo n.º 28
0
def _do_not_use_set_local_execution_context():
    factory = executor_stacks.local_executor_factory()
    context = execution_context.ExecutionContext(
        executor_fn=factory, compiler_fn=_do_not_use_transform_to_native_form)
    set_default_context.set_default_context(context)
Ejemplo n.º 29
0
class ExecutorStacksTest(parameterized.TestCase):
    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
        ('debug_executor', executor_stacks.thread_debugging_executor_factory),
    )
    def test_construction_with_no_args(self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn()
        self.assertIsInstance(executor_factory_impl,
                              executor_stacks.ResourceManagingExecutorFactory)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_construction_raises_with_max_fanout_one(self,
                                                     executor_factory_fn):
        with self.assertRaises(ValueError):
            executor_factory_fn(max_fanout=1)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('local_executor_three_clients',
         executor_stacks.local_executor_factory(num_clients=3)),
        ('sizing_executor_three_clients',
         executor_stacks.sizing_executor_factory(num_clients=3)),
    )
    @test_utils.skip_test_for_multi_gpu
    def test_execution_of_temperature_sensor_example(self, executor):
        comp = _temperature_sensor_example_next_fn()
        to_float = lambda x: tf.cast(x, tf.float32)
        temperatures = [
            tf.data.Dataset.range(10).map(to_float),
            tf.data.Dataset.range(20).map(to_float),
            tf.data.Dataset.range(30).map(to_float),
        ]
        threshold = 15.0

        with executor_test_utils.install_executor(executor):
            result = comp(temperatures, threshold)

        self.assertAlmostEqual(result, 8.333, places=3)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_execution_with_inferred_clients_larger_than_fanout(
            self, executor_factory_fn):
        @computations.federated_computation(
            computation_types.at_clients(tf.int32))
        def foo(x):
            return intrinsics.federated_sum(x)

        executor = executor_factory_fn(max_fanout=3)
        with executor_test_utils.install_executor(executor):
            result = foo([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

        self.assertEqual(result, 55)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('debug_executor_none_clients',
         executor_stacks.thread_debugging_executor_factory()),
        ('local_executor_one_client',
         executor_stacks.local_executor_factory(num_clients=1)),
        ('sizing_executor_one_client',
         executor_stacks.sizing_executor_factory(num_clients=1)),
        ('debug_executor_one_client',
         executor_stacks.thread_debugging_executor_factory(num_clients=1)),
    )
    def test_execution_of_tensorflow(self, executor):
        @computations.tf_computation
        def comp():
            return tf.math.add(5, 5)

        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertEqual(result, 10)

    @parameterized.named_parameters(*_create_concurrent_maxthread_tuples())
    def test_limiting_concurrency_constructs_one_eager_executor(
            self, ex_factory, clients_per_thread, tf_executor_mock):
        num_clients = 10
        ex_factory.create_executor({placements.CLIENTS: num_clients})
        concurrency_level = math.ceil(num_clients / clients_per_thread)
        args_list = tf_executor_mock.call_args_list
        # One for server executor, one for unplaced executor, concurrency_level for
        # clients.
        self.assertLen(args_list, concurrency_level + 2)

    @mock.patch(
        'tensorflow_federated.python.core.impl.executors.reference_resolving_executor.ReferenceResolvingExecutor',
        return_value=ExecutorMock())
    def test_thread_debugging_executor_constructs_exactly_one_reference_resolving_executor(
            self, executor_mock):
        executor_stacks.thread_debugging_executor_factory().create_executor(
            {placements.CLIENTS: 10})
        executor_mock.assert_called_once()

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
        ('debug_executor', executor_stacks.thread_debugging_executor_factory),
    )
    def test_create_executor_raises_with_wrong_cardinalities(
            self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn(num_clients=5)
        cardinalities = {
            placements.SERVER: 1,
            None: 1,
            placements.CLIENTS: 1,
        }
        with self.assertRaises(ValueError, ):
            executor_factory_impl.create_executor(cardinalities)
Ejemplo n.º 30
0
class ExecutionContextIntegrationTest(parameterized.TestCase):
    def test_simple_no_arg_tf_computation_with_int_result(self):
        @computations.tf_computation
        def comp():
            return tf.constant(10)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertEqual(result, 10)

    def test_one_arg_tf_computation_with_int_param_and_result(self):
        @computations.tf_computation(tf.int32)
        def comp(x):
            return tf.add(x, 10)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp(3)

        self.assertEqual(result, 13)

    def test_three_arg_tf_computation_with_int_params_and_result(self):
        @computations.tf_computation(tf.int32, tf.int32, tf.int32)
        def comp(x, y, z):
            return tf.multiply(tf.add(x, y), z)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp(3, 4, 5)

        self.assertEqual(result, 35)

    def test_tf_computation_with_dataset_params_and_int_result(self):
        @computations.tf_computation(computation_types.SequenceType(tf.int32))
        def comp(ds):
            return ds.reduce(np.int32(0), lambda x, y: x + y)

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            ds = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32))
            result = comp(ds)

        self.assertEqual(result, 45)

    def test_tf_computation_with_structured_result(self):
        @computations.tf_computation
        def comp():
            return collections.OrderedDict([
                ('a', tf.constant(10)),
                ('b', tf.constant(20)),
            ])

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertIsInstance(result, collections.OrderedDict)
        self.assertDictEqual(result, {'a': 10, 'b': 20})

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('local_executor_three_clients',
         executor_stacks.local_executor_factory(num_clients=3)),
    )
    def test_with_temperature_sensor_example(self, executor):
        @computations.tf_computation(computation_types.SequenceType(
            tf.float32), tf.float32)
        def count_over(ds, t):
            return ds.reduce(
                np.float32(0),
                lambda n, x: n + tf.cast(tf.greater(x, t), tf.float32))

        @computations.tf_computation(computation_types.SequenceType(tf.float32)
                                     )
        def count_total(ds):
            return ds.reduce(np.float32(0.0), lambda n, _: n + 1.0)

        @computations.federated_computation(
            computation_types.at_clients(
                computation_types.SequenceType(tf.float32)),
            computation_types.at_server(tf.float32))
        def comp(temperatures, threshold):
            return intrinsics.federated_mean(
                intrinsics.federated_map(
                    count_over,
                    intrinsics.federated_zip([
                        temperatures,
                        intrinsics.federated_broadcast(threshold)
                    ])), intrinsics.federated_map(count_total, temperatures))

        with executor_test_utils.install_executor(executor):
            to_float = lambda x: tf.cast(x, tf.float32)
            temperatures = [
                tf.data.Dataset.range(10).map(to_float),
                tf.data.Dataset.range(20).map(to_float),
                tf.data.Dataset.range(30).map(to_float),
            ]
            threshold = 15.0
            result = comp(temperatures, threshold)
            self.assertAlmostEqual(result, 8.333, places=3)

    def test_changing_cardinalities_across_calls(self):
        @computations.federated_computation(
            computation_types.at_clients(tf.int32))
        def comp(x):
            return x

        five_ints = list(range(5))
        ten_ints = list(range(10))

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            five = comp(five_ints)
            ten = comp(ten_ints)

        self.assertEqual(five, five_ints)
        self.assertEqual(ten, ten_ints)

    def test_conflicting_cardinalities_within_call(self):
        @computations.federated_computation([
            computation_types.at_clients(tf.int32),
            computation_types.at_clients(tf.int32),
        ])
        def comp(x):
            return x

        five_ints = list(range(5))
        ten_ints = list(range(10))

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            with self.assertRaisesRegex(ValueError,
                                        'Conflicting cardinalities'):
                comp([five_ints, ten_ints])

    def test_tuple_argument_can_accept_unnamed_elements(self):
        @computations.tf_computation(tf.int32, tf.int32)
        def foo(x, y):
            return x + y

        executor = executor_stacks.local_executor_factory()
        with executor_test_utils.install_executor(executor):
            # pylint:disable=no-value-for-parameter
            result = foo(structure.Struct([(None, 2), (None, 3)]))
            # pylint:enable=no-value-for-parameter

        self.assertEqual(result, 5)