示例#1
0
 def testZeroEpochScheduleWithWarmup(self):
     global_step = tf.placeholder_with_default(0, [], 'global_step')
     with self.assertRaises(ValueError):
         utils.build_learning_rate_schedule(
             learning_rate=1.,
             decay_type=enums.DecayType.EXPONENTIAL,
             warmup_start_epoch=12,
             max_learning_rate_epoch=24,
             decay_end_epoch=24,
             global_step=global_step,
             steps_per_epoch=100)
示例#2
0
    def testPiecewiseLinearDecayWithoutWarmup(self):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        steps_per_epoch = 100
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.PIECEWISE_LINEAR,
            warmup_start_epoch=0,
            max_learning_rate_epoch=0,
            decay_end_epoch=1000,
            global_step=global_step,
            steps_per_epoch=steps_per_epoch)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 1})
            self.assertAlmostEqual(1., initial_learning_rate)

            for i, epoch in enumerate(self.DEFAULT_BOUNDARY_EPOCHS):
                before_decay_learning_rate = sess.run(
                    learning_rate,
                    feed_dict={global_step: epoch * steps_per_epoch})
                after_decay_learning_rate = sess.run(
                    learning_rate,
                    feed_dict={global_step: epoch * steps_per_epoch + 1})

                self.assertAlmostEqual(before_decay_learning_rate,
                                       self.DEFAULT_DECAY_RATES[i])
                self.assertAlmostEqual(after_decay_learning_rate,
                                       self.DEFAULT_DECAY_RATES[i + 1])

            final_learning_rate = sess.run(
                learning_rate, feed_dict={global_step: 1000 * steps_per_epoch})
            self.assertAlmostEqual(1e-4, final_learning_rate, places=7)
示例#3
0
    def testCosineDecayWithWarmup(self):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.COSINE,
            warmup_start_epoch=0,
            max_learning_rate_epoch=5,
            decay_end_epoch=10,
            global_step=global_step,
            steps_per_epoch=100)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 0})
            self.assertAlmostEqual(0., initial_learning_rate)

            intermediate_warmup_learning_rate = sess.run(
                learning_rate, feed_dict={global_step: 250})
            self.assertAlmostEqual(0.5, intermediate_warmup_learning_rate)

            max_learning_rate = sess.run(learning_rate,
                                         feed_dict={global_step: 500})
            self.assertAlmostEqual(1., max_learning_rate)

            final_learning_rate = sess.run(learning_rate,
                                           feed_dict={global_step: 1000})
            self.assertAlmostEqual(0., final_learning_rate)
示例#4
0
    def testExponentialDecayWithDecayRate(self):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.EXPONENTIAL,
            warmup_start_epoch=0,
            max_learning_rate_epoch=5,
            decay_end_epoch=10,
            global_step=global_step,
            steps_per_epoch=100,
            decay_rate=0.99)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 0})
            self.assertAlmostEqual(0., initial_learning_rate)

            max_learning_rate = sess.run(learning_rate,
                                         feed_dict={global_step: 500})
            self.assertAlmostEqual(1., max_learning_rate)

            final_learning_rate = sess.run(learning_rate,
                                           feed_dict={global_step: 1000})
            self.assertAlmostEqual((0.99)**np.floor((500) / (2.4 * 100)),
                                   final_learning_rate,
                                   places=5)
示例#5
0
    def testExponentialDecayWithWarmup(self):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.EXPONENTIAL,
            warmup_start_epoch=0,
            max_learning_rate_epoch=5,
            decay_end_epoch=10,
            global_step=global_step,
            steps_per_epoch=100)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 0})
            self.assertAlmostEqual(0., initial_learning_rate)

            max_learning_rate = sess.run(learning_rate,
                                         feed_dict={global_step: 500})
            self.assertAlmostEqual(1., max_learning_rate)

            intermediate_warmup_learning_rate = sess.run(
                learning_rate, feed_dict={global_step: 250})
            self.assertAlmostEqual(0.5, intermediate_warmup_learning_rate)

            final_learning_rate = sess.run(learning_rate,
                                           feed_dict={global_step: 1000})
            self.assertAlmostEqual(self.DEFAULT_DECAY_FACTOR**np.floor(
                500 / (self.DEFAULT_EPOCHS_PER_DECAY * 100)),
                                   final_learning_rate,
                                   places=5)
示例#6
0
            def stage_learning_rate(stage_training_params, start_epoch,
                                    end_epoch):
                schedule_kwargs = {}
                if (stage_training_params.learning_rate_decay
                        in (enums.DecayType.PIECEWISE_LINEAR,
                            enums.DecayType.EXPONENTIAL)):
                    schedule_kwargs[
                        'decay_rate'] = stage_training_params.decay_rate
                    if (stage_training_params.learning_rate_decay ==
                            enums.DecayType.PIECEWISE_LINEAR):
                        schedule_kwargs['boundary_epochs'] = (
                            stage_training_params.decay_boundary_epochs)
                    if (stage_training_params.learning_rate_decay ==
                            enums.DecayType.EXPONENTIAL):
                        schedule_kwargs['epochs_per_decay'] = (
                            stage_training_params.epochs_per_decay)

                return utils.build_learning_rate_schedule(
                    learning_rate=(stage_training_params.base_learning_rate *
                                   (batch_size / BASE_BATCH_SIZE)),
                    decay_type=stage_training_params.learning_rate_decay,
                    warmup_start_epoch=start_epoch,
                    max_learning_rate_epoch=(
                        start_epoch +
                        stage_training_params.learning_rate_warmup_epochs),
                    decay_end_epoch=end_epoch,
                    global_step=global_step,
                    steps_per_epoch=steps_per_epoch,
                    **schedule_kwargs)
示例#7
0
 def testZeroEpochSchedule(self):
     global_step = tf.placeholder_with_default(0, [], 'global_step')
     learning_rate = utils.build_learning_rate_schedule(
         learning_rate=1.,
         decay_type=enums.DecayType.EXPONENTIAL,
         warmup_start_epoch=24,
         max_learning_rate_epoch=24,
         decay_end_epoch=24,
         global_step=global_step,
         steps_per_epoch=100)
     self.assertEqual(1., learning_rate)
示例#8
0
    def testPiecewiseLinearDecayWithBoundaries(self):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        boundary_epochs = [1, 2, 3, 4]
        decay_rate = 1e-1
        max_learning_rate_epoch = 5
        steps_per_epoch = 100
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.PIECEWISE_LINEAR,
            warmup_start_epoch=0,
            max_learning_rate_epoch=max_learning_rate_epoch,
            decay_end_epoch=1000,
            global_step=global_step,
            steps_per_epoch=steps_per_epoch,
            boundary_epochs=boundary_epochs,
            decay_rate=decay_rate)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 0})
            self.assertAlmostEqual(0., initial_learning_rate)

            max_learning_rate = sess.run(learning_rate,
                                         feed_dict={global_step: 500})
            self.assertAlmostEqual(1., max_learning_rate)

            for i, epoch in enumerate(boundary_epochs):
                before_decay_learning_rate = sess.run(
                    learning_rate,
                    feed_dict={
                        global_step:
                        (epoch + max_learning_rate_epoch) * steps_per_epoch
                    })
                after_decay_learning_rate = sess.run(
                    learning_rate,
                    feed_dict={
                        global_step:
                        ((epoch + max_learning_rate_epoch) * steps_per_epoch) +
                        1
                    })

                self.assertAlmostEqual(before_decay_learning_rate,
                                       decay_rate**i)
                self.assertAlmostEqual(after_decay_learning_rate,
                                       decay_rate**(i + 1))

            final_learning_rate = sess.run(learning_rate,
                                           feed_dict={global_step: 1000})
            self.assertAlmostEqual(1e-4, final_learning_rate, places=5)
示例#9
0
    def testExponentialDecayWithoutWarmup(self, decay_index):
        global_step = tf.placeholder_with_default(0, [], 'global_step')
        learning_rate = utils.build_learning_rate_schedule(
            learning_rate=1.,
            decay_type=enums.DecayType.EXPONENTIAL,
            warmup_start_epoch=0,
            max_learning_rate_epoch=0,
            decay_end_epoch=24,
            global_step=global_step,
            steps_per_epoch=100)

        with self.cached_session() as sess:
            initial_learning_rate = sess.run(learning_rate,
                                             feed_dict={global_step: 0})
            self.assertAlmostEqual(1., initial_learning_rate)

            staircase_drop_step = 100 * decay_index * self.DEFAULT_EPOCHS_PER_DECAY
            before_staircase_drop_learning_rate = sess.run(
                learning_rate, feed_dict={global_step: staircase_drop_step})
            after_staircase_drop_learning_rate = sess.run(
                learning_rate,
                feed_dict={global_step: staircase_drop_step + 1})

            self.assertAlmostEqual(self.DEFAULT_DECAY_FACTOR**(
                np.floor(staircase_drop_step /
                         (self.DEFAULT_EPOCHS_PER_DECAY * 100)) - 1),
                                   before_staircase_drop_learning_rate,
                                   places=5)
            self.assertAlmostEqual(self.DEFAULT_DECAY_FACTOR**(np.floor(
                staircase_drop_step / (self.DEFAULT_EPOCHS_PER_DECAY * 100))),
                                   after_staircase_drop_learning_rate,
                                   places=5)

            final_learning_rate = sess.run(learning_rate,
                                           feed_dict={global_step: 2400})
            self.assertAlmostEqual(self.DEFAULT_DECAY_FACTOR**(
                np.floor(2400 / (self.DEFAULT_EPOCHS_PER_DECAY * 100)) - 1.),
                                   final_learning_rate,
                                   places=5)