예제 #1
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=args.init_lr,
                          trainable=False)
     opt = tf.train.GradientDescentOptimizer(lr)
     return optimizer.apply_grad_processors(opt,
                                            [gradproc.GlobalNormClip(5)])
예제 #2
0
 def _get_optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=1e-3,
                          trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [gradproc.GlobalNormClip(10),
               gradproc.SummaryGradient()])
예제 #3
0
 def _get_optimizer(self):
     conf = Config()
     lr = tf.get_variable('learning_rate',
                          initializer=conf.learning_rate,
                          trainable=False)
     opt = tf.train.AdamOptimizer(lr)
     tf.summary.scalar('learning_rate', lr)
     return optimizer.apply_grad_processors(
         opt, [gradproc.GlobalNormClip(conf.max_grad_norm)])
예제 #4
0
 def _get_optimizer(self):
     gradprocs = [
         FilterGradientVariables('.*net2.*', verbose=False),
         gradproc.MapGradient(
             lambda grad: tf.clip_by_value(grad, hp.train2.clip_value_min, hp.train2.clip_value_max)),
         gradproc.GlobalNormClip(hp.train2.clip_norm),
         # gradproc.PrintGradient(),
         # gradproc.CheckGradient(),
     ]
     lr = tf.get_variable('learning_rate', initializer=hp.train2.lr, trainable=False)
     opt = tf.train.AdamOptimizer(learning_rate=lr)
     return optimizer.apply_grad_processors(opt, gradprocs)
예제 #5
0
 def _get_optimizer(self):
     gradprocs = [
         tensorpack_extension.FilterGradientVariables('.*net2.*', verbose=False),
         gradproc.MapGradient(
             lambda grad: tf.clip_by_value(grad, hp.train2.clip_value_min, hp.train2.clip_value_max)),
         gradproc.GlobalNormClip(hp.train2.clip_norm),
         # gradproc.PrintGradient(),
         # gradproc.CheckGradient(),
     ]
     global_step = tf.Variable(0, name='global_step',trainable=False)
     #self.lr = self.learning_rate_decay(global_step, hp.train2.lr)
     #lr = learning_rate_decay(initial_lr = hp.train2.lr, global_step)
     lr = tf.get_variable('learning_rate', initializer=hp.train2.lr, trainable=False)
     opt = tf.train.AdamOptimizer(learning_rate=lr)
     return optimizer.apply_grad_processors(opt, gradprocs)
예제 #6
0
 def _get_optimizer(self):
     lr = symbf.get_scalar_var('learning_rate', 1e-3, summary=True)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [gradproc.GlobalNormClip(10),
               gradproc.SummaryGradient()])