Esempio n. 1
0
def callbacks():
    def _schedule(model_version):
        return 0.5 if model_version < 100 else 0.2

    learning_reate_scheduler = LearningRateScheduler(_schedule)
    max_steps_stopping = MaxStepsStopping(max_steps=200)
    return [max_steps_stopping, learning_reate_scheduler]
def callbacks():
    def _schedule(model_version):
        if model_version < 5000:
            return 0.0003
        elif model_version < 12000:
            return 0.0002
        else:
            return 0.0001

    return [LearningRateScheduler(_schedule)]
Esempio n. 3
0
def callbacks():
    def _schedule(model_version):
        if model_version < 5000:
            return 0.1
        elif model_version < 15000:
            return 0.01
        else:
            return 0.001

    LearningRateScheduler(_schedule)
Esempio n. 4
0
    def test_raise_error(self):
        def _schedule(model_version):
            return 1 if model_version < 2 else 2

        learning_rate_scheduler = LearningRateScheduler(_schedule)
        model = tf.keras.Model()
        learning_rate_scheduler.set_model(model)
        with self.assertRaises(ValueError):
            learning_rate_scheduler.on_train_batch_begin(batch=1)

        model.optimizer = tf.optimizers.SGD(0.1)
        with self.assertRaises(ValueError):
            learning_rate_scheduler.on_train_batch_begin(batch=1)
Esempio n. 5
0
    def test_learning_rate_scheduler(self):
        learning_rate_scheduler = LearningRateScheduler(self._schedule)
        model = tf.keras.Model()
        model.optimizer = tf.optimizers.SGD(0.1)
        learning_rate_scheduler.set_model(model)

        learning_rate_scheduler.on_train_batch_begin(batch=1)
        self.assertEqual(model.optimizer.lr.numpy(), np.float32(0.2))
        learning_rate_scheduler.on_train_batch_begin(batch=2)
        self.assertEqual(model.optimizer.lr.numpy(), np.float32(0.1))

        model_versions = [0, 1, 2]
        variables = []
        grads = []
        original_values = [1.2, 0.8]
        grad_values = [0.2, 0.1]

        for i in range(len(model_versions)):
            variables.append([tf.Variable(v) for v in original_values])
            grads.append([tf.convert_to_tensor(g) for g in grad_values])

        results = []
        for i in range(len(model_versions)):
            result = self.apply_gradients_with_scheduler(
                learning_rate_scheduler,
                model.optimizer,
                model_versions[i],
                variables[i],
                grads[i],
            )
            results.append(result)

        place = 5
        for i in range(0, len(model_versions)):
            i_diff = [
                original_values[j] - results[i][j]
                for j in range(len(original_values))
            ]
            for j in range(len(original_values)):
                # variable value change ratio equals the learning rate ratio
                # for SGD without momentum
                self.assertAlmostEqual(
                    i_diff[j],
                    grad_values[j] * self._schedule(model_versions[i]),
                    place,
                )
Esempio n. 6
0
def callbacks():
    def _schedule(model_version):
        return 0.5 if model_version < 100 else 0.2

    learning_reate_scheduler = LearningRateScheduler(_schedule)
    return [learning_reate_scheduler]