Exemple #1
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
     opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
     gradprocs = [
         gradproc.ScaleGradient([('conv0.*', 0.1), ('group[0-2].*', 0.1)])
     ]
     return optimizer.apply_grad_processors(opt, gradprocs)
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=2e-4, dtype=tf.float32, trainable=False)
     # lr = tf.get_variable('learning_rate', initializer=0.0, dtype=tf.float32, trainable=False)
     opt = tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-6)
     # generator learns 5 times faster
     return optimizer.apply_grad_processors(
         opt, [gradproc.ScaleGradient(('gen/.*', 5))])
Exemple #3
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(
         opt, [
             gradproc.ScaleGradient(('STN.*', 0.1)),
             gradproc.SummaryGradient()])
Exemple #4
0
 def _get_optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=3e-5,
                          trainable=False)
     opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
     return optimizer.apply_grad_processors(opt, [
         gradproc.ScaleGradient([('convfcweight.*', 0.1), ('conv5_.*', 5)])
     ])
Exemple #5
0
 def optimizer(self):
     lr = tf.get_variable('lr', initializer=3e-3, trainable=False)
     # opt = tf.train.GradientDescentOptimizer(lr)
     opt = tf.train.AdamOptimizer(lr)
     # freeze all variables in network
     opt = optimizer.apply_grad_processors(
         opt, [gradproc.ScaleGradient([('.*/b', 0.), ('.*/W', 0.)])])
     return opt
Exemple #6
0
 def optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=args.base_lr,
                          trainable=False)
     opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
     add_moving_summary(lr)
     if args.load:
         gradprocs = [
             gradproc.ScaleGradient([('conv.*', 0.1), ('fc.*', 0.1)])
         ]
         return optimizer.apply_grad_processors(opt, gradprocs)
     else:
         return opt
Exemple #7
0
    def _get_optimizer(self):
        lr = symbf.get_scalar_var('learning_rate', 0.003, summary=True)

        factor = get_batch_factor()
        if factor != 1:
            lr = lr / float(factor)
            opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = optimizer.AccumGradOptimizer(opt, factor)
        else:
            opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt
        return optimizer.apply_grad_processors(
            opt, [gradproc.ScaleGradient(('.*/b', 2))])
Exemple #8
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar('learning_rate', lr)

        factor = get_batch_factor()
        if factor != 1:
            lr = lr / float(factor)
            opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = optimizer.AccumGradOptimizer(opt, factor)
        else:
            opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt
        return optimizer.apply_grad_processors(
            opt, [gradproc.ScaleGradient(('.*/b', 2))])
Exemple #9
0
 def _get_optimizer(self):
     lr = get_scalar_var('learning_rate', cfg.base_lr, summary=True)
     opt = tf.train.MomentumOptimizer(learning_rate=lr,
                                      momentum=cfg.momentum)
     gradprocs = [
         gradproc.ScaleGradient([('conv.*/W', 1),
                                 ('conv.*/b', cfg.bias_lr_mult),
                                 ('bottleneck.*/W', 1),
                                 ('bottleneck.*/b', cfg.bias_lr_mult),
                                 ('stage_1.*/W', 1),
                                 ('stage_1.*/b', cfg.bias_lr_mult),
                                 ('stage_[2-6].*/W', cfg.lr_mult),
                                 ('stage_[2-6].*/b',
                                  cfg.lr_mult * cfg.bias_lr_mult)])
     ]
     return optimizer.apply_grad_processors(opt, gradprocs)
Exemple #10
0
 def _get_optimizer(self):
     lr = symbf.get_scalar_var('learning_rate', 0.003, summary=True)
     opt = tf.train.MomentumOptimizer(lr, 0.9)
     return optimizer.apply_grad_processors(
         opt, [gradproc.ScaleGradient(('.*/b', 2))])
 def optimizer(self):
     lr = tf.get_variable('learning_rate', initializer=1e-3, trainable=False)
     # opt = tf.train.MomentumOptimizer(lr, 0.9)
     opt = tf.train.AdamOptimizer(learning_rate=lr)
     # return opt
     return optimizer.apply_grad_processors(opt, [gradproc.ScaleGradient(('5_Tr_Cv.*', 0.1)), gradproc.SummaryGradient()])