コード例 #1
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.001,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)

        gradprocs = [
            MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)),
            SummaryGradient()
        ]
        opt = optimizer.apply_grad_processors(opt, gradprocs)
        return opt
コード例 #2
0
    def optimizer(self, scope_name="master"):
        lr = tf.get_variable('learning_rate',
                             initializer=0.001,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)

        gradprocs = [
            MapGradient(lambda grad: tf.clip_by_norm(
                grad, 0.1 * tf.cast(tf.size(grad), tf.float32))),
            SummaryGradient()
        ]
        opt = optimizer.apply_grad_processors(opt, gradprocs)
        return opt
コード例 #3
0
ファイル: main.py プロジェクト: waxz/ppo_torcs
 def _get_opt(name, init_lr):
     lr = symbf.get_scalar_var('learning_rate/' + name,
                               init_lr,
                               summary=True)
     opt = tf.train.AdamOptimizer(lr)
     logger.info("create opt {}".format(name))
     gradprocs = [
         # MapGradient(lambda grad: tf.Print(grad, [grad], 'grad {}='.format(grad.op.name), summarize=4)),
         MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1),
                     regex='^actor/.*'),
         MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.05),
                     regex='^critic/.*'),
         # GlobalNormClip(40.),
         SummaryGradient(),
     ]
     opt = optimizer.apply_grad_processors(opt, gradprocs)
     return opt
コード例 #4
0
 def _get_optimizer(self, name):
     from tensorpack.tfutils import optimizer
     from tensorpack.tfutils.gradproc import SummaryGradient, GlobalNormClip, MapGradient
     init_lr = INIT_LEARNING_RATE_A if name == 'actor' else INIT_LEARNING_RATE_C
     import tensorpack.tfutils.symbolic_functions as symbf
     lr = symbf.get_scalar_var('learning_rate/' + name,
                               init_lr,
                               summary=True)
     opt = tf.train.AdamOptimizer(lr)
     logger.info("create opt {}".format(name))
     gradprocs = [
         # MapGradient(lambda grad: tf.Print(grad, [grad], 'grad {}='.format(grad.op.name), summarize=4)),
         MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1),
                     regex='^actor/.*'),
         MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.05),
                     regex='^critic/.*'),
         # GlobalNormClip(40.),
         SummaryGradient(),
     ]
     opt = optimizer.apply_grad_processors(opt, gradprocs)
     return opt
コード例 #5
0
 def _get_optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [GlobalNormClip(5), SummaryGradient()])
コード例 #6
0
 def get_gradient_processor(self):
     return [MapGradient(lambda grad: \
             tf.clip_by_global_norm([grad], 5)[0][0]),
             SummaryGradient()]
コード例 #7
0
ファイル: train-atari.py プロジェクト: deepsense-ai/BA3C-CPU
 def get_gradient_processor(self):
     return [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)),
             SummaryGradient()]
コード例 #8
0
ファイル: edr10.py プロジェクト: voidiak/MTRE
 def optimizer(self):
     lr = tf.get_variable("learning_rate", initializer=self.lr, trainable=False)
     opt = tf.train.AdamOptimizer(lr)
     return optimizer.apply_grad_processors(
         opt, [GlobalNormClip(5), SummaryGradient()]
     )