예제 #1
0
  def test_get_size_info(self, num_clients):

    @computations.federated_computation(
        type_factory.at_clients(computation_types.SequenceType(tf.float32)),
        type_factory.at_server(tf.float32))
    def comp(temperatures, threshold):
      client_data = [temperatures, intrinsics.federated_broadcast(threshold)]
      result_map = intrinsics.federated_map(
          count_over, intrinsics.federated_zip(client_data))
      count_map = intrinsics.federated_map(count_total, temperatures)
      return intrinsics.federated_mean(result_map, count_map)

    factory = executor_stacks.sizing_executor_factory(num_clients=num_clients)
    default_executor.set_default_executor(factory)

    to_float = lambda x: tf.cast(x, tf.float32)
    temperatures = [tf.data.Dataset.range(10).map(to_float)] * num_clients
    threshold = 15.0
    comp(temperatures, threshold)

    # Each client receives a tf.float32 and uploads two tf.float32 values.
    expected_broadcast_bits = num_clients * 32
    expected_aggregate_bits = expected_broadcast_bits * 2
    expected = ({
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients
    }, {
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients * 2
    }, [expected_broadcast_bits], [expected_aggregate_bits])

    self.assertEqual(expected, factory.get_size_info())
예제 #2
0
def create_sizing_execution_context(default_num_clients: int = 0,
                                    max_fanout: int = 100,
                                    clients_per_thread: int = 1):
  """Creates an execution context that executes computations locally."""
  factory = executor_stacks.sizing_executor_factory(
      default_num_clients=default_num_clients,
      max_fanout=max_fanout,
      clients_per_thread=clients_per_thread)
  return execution_context.ExecutionContext(
      executor_fn=factory, compiler_fn=compiler.transform_to_native_form)
def _create_concurrent_maxthread_tuples():
  tuples = []
  for concurrency in range(1, 5):
    local_ex_string = 'local_executor_{}_client_thread'.format(concurrency)
    ex_factory = executor_stacks.local_executor_factory(
        num_client_executors=concurrency)
    tuples.append((local_ex_string, ex_factory, concurrency))
    sizing_ex_string = 'sizing_executor_{}_client_thread'.format(concurrency)
    ex_factory = executor_stacks.sizing_executor_factory(
        num_client_executors=concurrency)
    tuples.append((sizing_ex_string, ex_factory, concurrency))
  return tuples
class ExecutorsTest(parameterized.TestCase):

  @executor_test_utils.executors
  def test_without_arguments(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors()
  def test_with_no_arguments(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors(
      ('local', executor_stacks.local_executor_factory()),)
  def test_with_one_argument(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)

  @executor_test_utils.executors(
      ('local', executor_stacks.local_executor_factory()),
      ('sizing', executor_stacks.sizing_executor_factory()),
  )
  def test_with_two_argument(self):

    @computations.tf_computation(tf.int32)
    def add_one(x):
      return x + 1

    result = add_one(5)

    self.assertEqual(result, 6)
예제 #5
0
  def decorator(fn, *named_executors):
    if not named_executors:
      named_executors = [
          ('local', executor_stacks.local_executor_factory()),
          ('sizing', executor_stacks.sizing_executor_factory()),
      ]

    @parameterized.named_parameters(*named_executors)
    def wrapped_fn(self, executor):
      """Install a particular execution context before running `fn`."""
      context = execution_context.ExecutionContext(executor)
      with context_stack_impl.context_stack.install(context):
        fn(self)

    return wrapped_fn
예제 #6
0
    def test_get_size_info(self, num_clients):
        @computations.federated_computation(
            type_factory.at_clients(computation_types.SequenceType(
                tf.float32)), type_factory.at_server(tf.float32))
        def comp(temperatures, threshold):
            client_data = [
                temperatures,
                intrinsics.federated_broadcast(threshold)
            ]
            result_map = intrinsics.federated_map(
                count_over, intrinsics.federated_zip(client_data))
            count_map = intrinsics.federated_map(count_total, temperatures)
            return intrinsics.federated_mean(result_map, count_map)

        sizing_factory = executor_stacks.sizing_executor_factory(
            num_clients=num_clients)
        sizing_context = execution_context.ExecutionContext(sizing_factory)
        with get_context_stack.get_context_stack().install(sizing_context):
            to_float = lambda x: tf.cast(x, tf.float32)
            temperatures = [tf.data.Dataset.range(10).map(to_float)
                            ] * num_clients
            threshold = 15.0
            comp(temperatures, threshold)

            # Each client receives a tf.float32 and uploads two tf.float32 values.
            expected_broadcast_bits = [num_clients * 32]
            expected_aggregate_bits = [num_clients * 32 * 2]
            expected_broadcast_history = {
                (('CLIENTS', num_clients), ): [[1, tf.float32]] * num_clients
            }
            expected_aggregate_history = {
                (('CLIENTS', num_clients), ):
                [[1, tf.float32]] * num_clients * 2
            }

            size_info = sizing_factory.get_size_info()

            self.assertEqual(expected_broadcast_history,
                             size_info.broadcast_history)
            self.assertEqual(expected_aggregate_history,
                             size_info.aggregate_history)
            self.assertEqual(expected_broadcast_bits, size_info.broadcast_bits)
            self.assertEqual(expected_aggregate_bits, size_info.aggregate_bits)
예제 #7
0
  def decorator(fn, *named_executors):
    if isinstance(fn, type):
      raise TypeError('Do not directly decorate classes with the executors '
                      'decorator; this will cause the tests to be skipped. '
                      'Decorate the member test functions instead.')

    if not named_executors:
      named_executors = [
          ('local', executor_stacks.local_executor_factory()),
          ('sizing', executor_stacks.sizing_executor_factory()),
      ]

    @parameterized.named_parameters(*named_executors)
    def wrapped_fn(self, executor):
      """Install a particular execution context before running `fn`."""
      context = execution_context.ExecutionContext(executor)
      with context_stack_impl.context_stack.install(context):
        fn(self)

    return wrapped_fn
예제 #8
0
def _create_concurrent_maxthread_tuples():
    tuples = []
    for concurrency in range(1, 5):
        local_ex_string = 'local_executor_{}_clients_per_thread'.format(
            concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.local_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (local_ex_string, ex_factory, concurrency, tf_executor_mock))
        sizing_ex_string = 'sizing_executor_{}_client_thread'.format(
            concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.sizing_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (sizing_ex_string, ex_factory, concurrency, tf_executor_mock))
        debug_ex_string = 'debug_executor_{}_client_thread'.format(concurrency)
        tf_executor_mock = ExecutorMock()
        ex_factory = executor_stacks.thread_debugging_executor_factory(
            clients_per_thread=concurrency, leaf_executor_fn=tf_executor_mock)
        tuples.append(
            (debug_ex_string, ex_factory, concurrency, tf_executor_mock))
    return tuples
예제 #9
0
class ExecutorStacksTest(parameterized.TestCase):
    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
        ('debug_executor', executor_stacks.thread_debugging_executor_factory),
    )
    def test_construction_with_no_args(self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn()
        self.assertIsInstance(executor_factory_impl,
                              executor_stacks.ResourceManagingExecutorFactory)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_construction_raises_with_max_fanout_one(self,
                                                     executor_factory_fn):
        with self.assertRaises(ValueError):
            executor_factory_fn(max_fanout=1)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('local_executor_three_clients',
         executor_stacks.local_executor_factory(num_clients=3)),
        ('sizing_executor_three_clients',
         executor_stacks.sizing_executor_factory(num_clients=3)),
    )
    @test_utils.skip_test_for_multi_gpu
    def test_execution_of_temperature_sensor_example(self, executor):
        comp = _temperature_sensor_example_next_fn()
        to_float = lambda x: tf.cast(x, tf.float32)
        temperatures = [
            tf.data.Dataset.range(10).map(to_float),
            tf.data.Dataset.range(20).map(to_float),
            tf.data.Dataset.range(30).map(to_float),
        ]
        threshold = 15.0

        with executor_test_utils.install_executor(executor):
            result = comp(temperatures, threshold)

        self.assertAlmostEqual(result, 8.333, places=3)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_execution_with_inferred_clients_larger_than_fanout(
            self, executor_factory_fn):
        @computations.federated_computation(
            computation_types.at_clients(tf.int32))
        def foo(x):
            return intrinsics.federated_sum(x)

        executor = executor_factory_fn(max_fanout=3)
        with executor_test_utils.install_executor(executor):
            result = foo([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

        self.assertEqual(result, 55)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('debug_executor_none_clients',
         executor_stacks.thread_debugging_executor_factory()),
        ('local_executor_one_client',
         executor_stacks.local_executor_factory(num_clients=1)),
        ('sizing_executor_one_client',
         executor_stacks.sizing_executor_factory(num_clients=1)),
        ('debug_executor_one_client',
         executor_stacks.thread_debugging_executor_factory(num_clients=1)),
    )
    def test_execution_of_tensorflow(self, executor):
        @computations.tf_computation
        def comp():
            return tf.math.add(5, 5)

        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertEqual(result, 10)

    @parameterized.named_parameters(*_create_concurrent_maxthread_tuples())
    def test_limiting_concurrency_constructs_one_eager_executor(
            self, ex_factory, clients_per_thread, tf_executor_mock):
        num_clients = 10
        ex_factory.create_executor({placements.CLIENTS: num_clients})
        concurrency_level = math.ceil(num_clients / clients_per_thread)
        args_list = tf_executor_mock.call_args_list
        # One for server executor, one for unplaced executor, concurrency_level for
        # clients.
        self.assertLen(args_list, concurrency_level + 2)

    @mock.patch(
        'tensorflow_federated.python.core.impl.executors.reference_resolving_executor.ReferenceResolvingExecutor',
        return_value=ExecutorMock())
    def test_thread_debugging_executor_constructs_exactly_one_reference_resolving_executor(
            self, executor_mock):
        executor_stacks.thread_debugging_executor_factory().create_executor(
            {placements.CLIENTS: 10})
        executor_mock.assert_called_once()

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
        ('debug_executor', executor_stacks.thread_debugging_executor_factory),
    )
    def test_create_executor_raises_with_wrong_cardinalities(
            self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn(num_clients=5)
        cardinalities = {
            placements.SERVER: 1,
            None: 1,
            placements.CLIENTS: 1,
        }
        with self.assertRaises(ValueError, ):
            executor_factory_impl.create_executor(cardinalities)
예제 #10
0
class ExecutorStacksTest(parameterized.TestCase):
    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_construction_with_no_args(self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn()
        self.assertIsInstance(executor_factory_impl,
                              executor_factory.ExecutorFactoryImpl)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_construction_raises_with_max_fanout_one(self,
                                                     executor_factory_fn):
        with self.assertRaises(ValueError):
            executor_factory_fn(max_fanout=1)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('local_executor_three_clients',
         executor_stacks.local_executor_factory(num_clients=3)),
        ('sizing_executor_three_clients',
         executor_stacks.sizing_executor_factory(num_clients=3)),
    )
    def test_execution_of_temperature_sensor_example(self, executor):
        comp = _temperature_sensor_example_next_fn()
        to_float = lambda x: tf.cast(x, tf.float32)
        temperatures = [
            tf.data.Dataset.range(10).map(to_float),
            tf.data.Dataset.range(20).map(to_float),
            tf.data.Dataset.range(30).map(to_float),
        ]
        threshold = 15.0

        with executor_test_utils.install_executor(executor):
            result = comp(temperatures, threshold)

        self.assertAlmostEqual(result, 8.333, places=3)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_execution_with_inferred_clients_larger_than_fanout(
            self, executor_factory_fn):
        @computations.federated_computation(type_factory.at_clients(tf.int32))
        def foo(x):
            return intrinsics.federated_sum(x)

        executor = executor_factory_fn(max_fanout=3)
        with executor_test_utils.install_executor(executor):
            result = foo([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

        self.assertEqual(result, 55)

    @parameterized.named_parameters(
        ('local_executor_none_clients',
         executor_stacks.local_executor_factory()),
        ('sizing_executor_none_clients',
         executor_stacks.sizing_executor_factory()),
        ('local_executor_one_client',
         executor_stacks.local_executor_factory(num_clients=1)),
        ('sizing_executor_one_client',
         executor_stacks.sizing_executor_factory(num_clients=1)),
    )
    def test_execution_of_tensorflow(self, executor):
        @computations.tf_computation
        def comp():
            return tf.math.add(5, 5)

        with executor_test_utils.install_executor(executor):
            result = comp()

        self.assertEqual(result, 10)

    @parameterized.named_parameters(
        ('local_executor', executor_stacks.local_executor_factory),
        ('sizing_executor', executor_stacks.sizing_executor_factory),
    )
    def test_create_executor_raises_with_wrong_cardinalities(
            self, executor_factory_fn):
        executor_factory_impl = executor_factory_fn(num_clients=5)
        cardinalities = {
            placement_literals.SERVER: 1,
            None: 1,
            placement_literals.CLIENTS: 1,
        }
        with self.assertRaises(ValueError, ):
            executor_factory_impl.create_executor(cardinalities)