Пример #1
0
    def test_server_eager_mode(self, optimizer_fn, updated_val,
                               num_optimizer_vars):
        model_fn = lambda: model_examples.TrainableLinearRegression(feature_dim
                                                                    =2)

        server_state = optimizer_utils.server_init(model_fn, optimizer_fn, (),
                                                   ())
        train_vars = server_state.model.trainable
        self.assertAllClose(train_vars['a'].numpy(), np.array([[0.0], [0.0]]))
        self.assertEqual(train_vars['b'].numpy(), 0.0)
        self.assertEqual(server_state.model.non_trainable['c'].numpy(), 0.0)
        self.assertLen(server_state.optimizer_state, num_optimizer_vars)
        weights_delta = tensor_utils.to_odict({
            'a': tf.constant([[1.0], [0.0]]),
            'b': tf.constant(1.0)
        })
        server_state = optimizer_utils.server_update_model(
            server_state, weights_delta, model_fn, optimizer_fn)

        train_vars = server_state.model.trainable
        # For SGD: learning_Rate=0.1, update=[1.0, 0.0], initial model=[0.0, 0.0],
        # so updated_val=0.1
        self.assertAllClose(train_vars['a'].numpy(), [[updated_val], [0.0]])
        self.assertAllClose(train_vars['b'].numpy(), updated_val)
        self.assertEqual(server_state.model.non_trainable['c'].numpy(), 0.0)
Пример #2
0
    def test_server_eager_mode(self, optimizer_fn, updated_val,
                               num_optimizer_vars):
        model_fn = lambda: model_examples.TrainableLinearRegression(feature_dim
                                                                    =2)

        server_state = optimizer_utils.server_init(model_fn, optimizer_fn, (),
                                                   ())
        model_vars = self.evaluate(server_state.model)
        train_vars = model_vars.trainable
        self.assertLen(train_vars, 2)
        self.assertAllClose(train_vars['a'], [[0.0], [0.0]])
        self.assertEqual(train_vars['b'], 0.0)
        self.assertEqual(model_vars.non_trainable, {'c': 0.0})
        self.assertLen(server_state.optimizer_state, num_optimizer_vars)
        weights_delta = collections.OrderedDict([
            ('a', tf.constant([[1.0], [0.0]])),
            ('b', tf.constant(1.0)),
        ])
        server_state = optimizer_utils.server_update_model(
            server_state, weights_delta, model_fn, optimizer_fn)

        model_vars = self.evaluate(server_state.model)
        train_vars = model_vars.trainable
        # For SGD: learning_Rate=0.1, update=[1.0, 0.0], initial model=[0.0, 0.0],
        # so updated_val=0.1
        self.assertLen(train_vars, 2)
        self.assertAllClose(train_vars['a'], [[updated_val], [0.0]])
        self.assertAllClose(train_vars['b'], updated_val)
        self.assertEqual(model_vars.non_trainable, {'c': 0.0})
Пример #3
0
    def test_trainable_linear_regression(self):
        dim = 1
        model = model_examples.TrainableLinearRegression(feature_dim=dim)
        init_op = tf.compat.v1.initializers.variables(
            model.trainable_variables + model.non_trainable_variables +
            model.local_variables)
        batch = model.make_batch(x=tf.compat.v1.placeholder(tf.float32,
                                                            shape=(None, dim)),
                                 y=tf.compat.v1.placeholder(tf.float32,
                                                            shape=(None, 1)))

        train_op = model.train_on_batch(batch)
        metrics = model.report_local_outputs()
        train_feed_dict = {batch.x: [[0.0], [5.0]], batch.y: [[0.0], [5.0]]}
        prior_loss = float('inf')
        with self.session() as sess:
            sess.run(init_op)
            num_iters = 10
            for _ in range(num_iters):
                r = sess.run(train_op, feed_dict=train_feed_dict)
                # Loss should be decreasing.
                self.assertLess(r.loss, prior_loss)
                prior_loss = r.loss

            m = sess.run(metrics)
            self.assertEqual(m['num_batches'], num_iters)
            self.assertEqual(m['num_examples'], 2 * num_iters)
            self.assertLess(m['loss'], 1.0)
Пример #4
0
    def test_orchestration_type_signature(self):
        iterative_process = optimizer_utils.build_model_delta_optimizer_process(
            model_fn=model_examples.TrainableLinearRegression,
            model_to_client_delta_fn=DummyClientDeltaFn,
            server_optimizer_fn=lambda: gradient_descent.SGD(learning_rate=1.0
                                                             ))

        expected_model_weights_type = model_utils.ModelWeights(
            collections.OrderedDict([('a', tff.TensorType(tf.float32, [2, 1])),
                                     ('b', tf.float32)]),
            collections.OrderedDict([('c', tf.float32)]))

        # ServerState consists of a model and optimizer_state. The optimizer_state
        # is provided by TensorFlow, TFF doesn't care what the actual value is.
        expected_federated_server_state_type = tff.FederatedType(
            optimizer_utils.ServerState(expected_model_weights_type,
                                        test.AnyType(), test.AnyType(),
                                        test.AnyType()),
            placement=tff.SERVER,
            all_equal=True)

        expected_federated_dataset_type = tff.FederatedType(tff.SequenceType(
            model_examples.TrainableLinearRegression().input_spec),
                                                            tff.CLIENTS,
                                                            all_equal=False)

        expected_model_output_types = tff.FederatedType(
            collections.OrderedDict([
                ('loss', tff.TensorType(tf.float32, [])),
                ('num_examples', tff.TensorType(tf.int32, [])),
            ]),
            tff.SERVER,
            all_equal=True)

        # `initialize` is expected to be a funcion of no arguments to a ServerState.
        self.assertEqual(
            tff.FunctionType(parameter=None,
                             result=expected_federated_server_state_type),
            iterative_process.initialize.type_signature)

        # `next` is expected be a function of (ServerState, Datasets) to
        # ServerState.
        self.assertEqual(
            tff.FunctionType(parameter=[
                expected_federated_server_state_type,
                expected_federated_dataset_type
            ],
                             result=(expected_federated_server_state_type,
                                     expected_model_output_types)),
            iterative_process.next.type_signature)
Пример #5
0
    def test_trainable_linear_regression(self):
        dim = 1
        model = model_examples.TrainableLinearRegression(feature_dim=dim)
        batch = model.make_batch(x=tf.constant([[0.0], [5.0]]),
                                 y=tf.constant([[0.0], [5.0]]))

        prior_loss = float('inf')
        num_iters = 10
        for _ in range(num_iters):
            result = model.train_on_batch(batch)
            # Loss should be decreasing.
            self.assertLess(result.loss, prior_loss)
            prior_loss = result.loss

        metrics = model.report_local_outputs()
        self.assertEqual(metrics['num_batches'], num_iters)
        self.assertEqual(metrics['num_examples'], 2 * num_iters)
        self.assertLess(metrics['loss'], 1.0)
Пример #6
0
    def test_server_graph_mode(self):
        optimizer_fn = lambda: gradient_descent.SGD(learning_rate=0.1)
        model_fn = lambda: model_examples.TrainableLinearRegression(feature_dim
                                                                    =2)

        # Explicitly entering a graph as a default enables graph-mode.
        with tf.Graph().as_default() as g:
            server_state_op = optimizer_utils.server_init(
                model_fn, optimizer_fn, (), ())
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            g.finalize()
            with self.session() as sess:
                sess.run(init_op)
                server_state = sess.run(server_state_op)
        train_vars = server_state.model.trainable
        self.assertAllClose(train_vars['a'], [[0.0], [0.0]])
        self.assertEqual(train_vars['b'], 0.0)
        self.assertEqual(server_state.model.non_trainable['c'], 0.0)
        self.assertEqual(server_state.optimizer_state, [0.0])

        with tf.Graph().as_default() as g:
            # N.B. Must use a fresh graph so variable names are the same.
            weights_delta = tensor_utils.to_odict({
                'a':
                tf.constant([[1.0], [0.0]]),
                'b':
                tf.constant(2.0)
            })
            update_op = optimizer_utils.server_update_model(
                server_state, weights_delta, model_fn, optimizer_fn)
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            g.finalize()
            with self.session() as sess:
                sess.run(init_op)
                server_state = sess.run(update_op)
        train_vars = server_state.model.trainable
        # learning_Rate=0.1, update is [1.0, 0.0], initial model is [0.0, 0.0].
        self.assertAllClose(train_vars['a'], [[0.1], [0.0]])
        self.assertAllClose(train_vars['b'], 0.2)
        self.assertEqual(server_state.model.non_trainable['c'], 0.0)
  def test_server_eager_mode(self, optimizer_fn, updated_val,
                             num_optimizer_vars):
    model_fn = lambda: model_examples.TrainableLinearRegression(feature_dim=2)

    server_state = optimizer_utils.server_init(model_fn, optimizer_fn, (), ())
    model_vars = self.evaluate(server_state.model)
    train_vars = model_vars.trainable
    self.assertLen(train_vars, 2)
    self.assertAllClose(train_vars, [np.zeros((2, 1)), 0.0])
    self.assertAllClose(model_vars.non_trainable, [0.0])
    self.assertLen(server_state.optimizer_state, num_optimizer_vars)
    weights_delta = [tf.constant([[1.0], [0.0]]), tf.constant(1.0)]
    server_state = optimizer_utils.server_update_model(server_state,
                                                       weights_delta, model_fn,
                                                       optimizer_fn)

    model_vars = self.evaluate(server_state.model)
    train_vars = model_vars.trainable
    # For SGD: learning_Rate=0.1, update=[1.0, 0.0], initial model=[0.0, 0.0],
    # so updated_val=0.1
    self.assertLen(train_vars, 2)
    self.assertAllClose(train_vars, [[[updated_val], [0.0]], updated_val])
    self.assertAllClose(model_vars.non_trainable, [0.0])
Пример #8
0
 def model(self):
   return model_examples.TrainableLinearRegression(feature_dim=2)