Esempio n. 1
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [
             gradproc.ScaleGradient(('STN.*', 0.1)),
             gradproc.SummaryGradient()])
Esempio n. 2
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=self.learning_rate,
                          trainable=False)
     opt = tf.train.RMSPropOptimizer(lr, epsilon=1e-5)
     return optimizer.apply_grad_processors(opt,
                                            [gradproc.SummaryGradient()])
Esempio n. 3
0
 def optimizer(self):
     opt = tf.train.AdamOptimizer(self.cfg.learning_rate)
     return optimizer.apply_grad_processors(opt, [
         gradproc.MapGradient(
             lambda grad: tf.clip_by_average_norm(grad, 0.3)),
         gradproc.SummaryGradient()
     ])
 def optimizer(self):
     lr = tf.get_variable("learning_rate", initializer=0.0002, trainable=False)
     tf.summary.scalar("learning_rate", lr)
     opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.999)
     return optimizer.apply_grad_processors(
         opt, [gradproc.SummaryGradient(), gradproc.CheckGradient()]
     )
Esempio n. 5
0
 def _get_optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=1e-3,
                          trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [gradproc.GlobalNormClip(10),
               gradproc.SummaryGradient()])
Esempio n. 6
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=self.learning_rate, trainable=False)
     # opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     opt = tf.train.AdamOptimizer(lr)
     return optimizer.apply_grad_processors(
         opt, [
             # gradproc.GlobalNormClip(2.0),
             gradproc.MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.5)),
               gradproc.SummaryGradient()])
Esempio n. 7
0
 def optimizer(self):
     lr = tf.train.exponential_decay(self.base_lr,
                                     global_step=get_global_step_var(),
                                     decay_steps=self.decay_steps,
                                     decay_rate=self.decay_rate,
                                     name='learning-rate')
     opt = tf.train.RMSPropOptimizer(learning_rate=lr)
     tf.summary.scalar('lr', lr)
     return optimizer.apply_grad_processors(opt,
                                            [gradproc.SummaryGradient()])
Esempio n. 8
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=1e-3,
                          trainable=False)
     tf.summary.scalar("learning_rate", lr)
     opt = tf.train.RMSPropOptimizer(lr,
                                     decay=0.95,
                                     momentum=0.95,
                                     epsilon=1e-2)
     return optimizer.apply_grad_processors(opt,
                                            [gradproc.SummaryGradient()])
Esempio n. 9
0
    def optimizer(self):
        lr = tf.get_variable('learning_rate', initializer=1e-3, trainable=False)
        # This will also put the summary in tensorboard, stat.json and print in terminal,
        # but this time without moving average
        tf.summary.scalar('lr', lr)
        # opt = tf.train.MomentumOptimizer(lr, 0.9)
        opt = tf.train.AdamOptimizer(lr)

        return optimizer.apply_grad_processors(
            opt, [gradproc.MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.5)),
                  gradproc.SummaryGradient()])
Esempio n. 10
0
 def _get_optimizer(self):
     lr = symbf.get_scalar_var('learning_rate', 1e-3, summary=True)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [gradproc.GlobalNormClip(10),
               gradproc.SummaryGradient()])
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=1e-3, trainable=False)
     # opt = tf.train.MomentumOptimizer(lr, 0.9)
     opt = tf.train.AdamOptimizer(learning_rate=lr)
     # return opt
     return optimizer.apply_grad_processors(opt, [gradproc.ScaleGradient(('5_Tr_Cv.*', 0.1)), gradproc.SummaryGradient()])