Ejemplo n.º 1
0
    def test_raise_error(self):
        def _schedule(model_version):
            return 1 if model_version < 2 else 2

        learning_rate_scheduler = LearningRateScheduler(_schedule)
        model = tf.keras.Model()
        learning_rate_scheduler.set_model(model)
        with self.assertRaises(ValueError):
            learning_rate_scheduler.on_train_batch_begin(batch=1)

        model.optimizer = tf.optimizers.SGD(0.1)
        with self.assertRaises(ValueError):
            learning_rate_scheduler.on_train_batch_begin(batch=1)
Ejemplo n.º 2
0
    def test_learning_rate_scheduler(self):
        learning_rate_scheduler = LearningRateScheduler(self._schedule)
        model = tf.keras.Model()
        model.optimizer = tf.optimizers.SGD(0.1)
        learning_rate_scheduler.set_model(model)

        learning_rate_scheduler.on_train_batch_begin(batch=1)
        self.assertEqual(model.optimizer.lr.numpy(), np.float32(0.2))
        learning_rate_scheduler.on_train_batch_begin(batch=2)
        self.assertEqual(model.optimizer.lr.numpy(), np.float32(0.1))

        model_versions = [0, 1, 2]
        variables = []
        grads = []
        original_values = [1.2, 0.8]
        grad_values = [0.2, 0.1]

        for i in range(len(model_versions)):
            variables.append([tf.Variable(v) for v in original_values])
            grads.append([tf.convert_to_tensor(g) for g in grad_values])

        results = []
        for i in range(len(model_versions)):
            result = self.apply_gradients_with_scheduler(
                learning_rate_scheduler,
                model.optimizer,
                model_versions[i],
                variables[i],
                grads[i],
            )
            results.append(result)

        place = 5
        for i in range(0, len(model_versions)):
            i_diff = [
                original_values[j] - results[i][j]
                for j in range(len(original_values))
            ]
            for j in range(len(original_values)):
                # variable value change ratio equals the learning rate ratio
                # for SGD without momentum
                self.assertAlmostEqual(
                    i_diff[j],
                    grad_values[j] * self._schedule(model_versions[i]),
                    place,
                )