Exemplo n.º 1
0
    def _get_optimizer(self):
        lr = symbf.get_scalar_var('learning_rate', 0.003, summary=True)

        factor = get_batch_factor()
        if factor != 1:
            lr = lr / float(factor)
            opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = optimizer.AccumGradOptimizer(opt, factor)
        else:
            opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt
        return optimizer.apply_grad_processors(
            opt, [gradproc.ScaleGradient(('.*/b', 2))])
Exemplo n.º 2
0
    def optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar('learning_rate-summary', lr)

        factor = cfg.TRAIN.NUM_GPUS / 8.
        if factor != 1:
            lr = lr * factor
        opt = tf.train.MomentumOptimizer(lr, 0.9)
        if cfg.TRAIN.NUM_GPUS < 8:
            opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
        return opt
Exemplo n.º 3
0
    def optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar('learning_rate-summary', lr)

        # The learning rate is set for 8 GPUs, and we use trainers with average=False.
        lr = lr / 8.
        opt = tf.train.MomentumOptimizer(lr, 0.9)

        opt = optimizer.AccumGradOptimizer(
            opt, 8 // cfg.TRAIN.NUM_GPUS)  # assume cfg.TRAIN.NUM_GPUS < 8:
        return opt
Exemplo n.º 4
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar('learning_rate', lr)

        factor = get_batch_factor()
        if factor != 1:
            lr = lr / float(factor)
            opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = optimizer.AccumGradOptimizer(opt, factor)
        else:
            opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt
Exemplo n.º 5
0
    def optimizer(self):
        lr = tf.get_variable("learning_rate",
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar("learning_rate-summary", lr)

        # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
        lr = lr / 8.0
        opt = tf.train.MomentumOptimizer(lr, 0.9)
        if cfg.TRAIN.NUM_GPUS < 8:
            opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
        if cfg.TRAIN.GRADIENT_CLIP != 0:
            opt = GradientClipOptimizer(opt, cfg.TRAIN.GRADIENT_CLIP)
        return opt
Exemplo n.º 6
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.003,
                             trainable=False)
        tf.summary.scalar('learning_rate', lr)

        factor = get_batch_factor()
        if factor != 1:
            lr = lr / float(factor)
            opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = optimizer.AccumGradOptimizer(opt, factor)
        else:
            opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt
        return optimizer.apply_grad_processors(
            opt, [gradproc.ScaleGradient(('.*/b', 2))])
Exemplo n.º 7
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=0.01,
                             trainable=False)
        tf.summary.scalar('learning_rate', lr)
        print("get_nr_gpu", get_nr_gpu())
        if config.BIG:
            if config.ACC:
                factor = 4
                lr = lr / float(factor)
                opt = tf.train.AdamOptimizer(lr)
                opt = optimizer.AccumGradOptimizer(opt, factor)
            else:
                opt = tf.train.AdamOptimizer(lr, 0.9)

        else:
            #opt = tf.train.MomentumOptimizer(lr, 0.9)
            opt = tf.train.AdamOptimizer(lr)
        return opt
Exemplo n.º 8
0
 def _get_optimizer(self):
     lr = tf.get_variable('learning_rate',
                          initializer=0.01,
                          trainable=False)
     tf.summary.scalar('learning_rate', lr)
     """
     factor = get_batch_factor() # accumulate size
     if factor != 1:
         lr = lr / float(factor)
         opt = tf.train.MomentumOptimizer(lr, 0.9)
         opt = optimizer.AccumGradOptimizer(opt, factor)
     else:
         opt = tf.train.MomentumOptimizer(lr, 0.9)
     """
     if config.ACCU:
         factor = 2
         # lr = lr / float(factor)
         opt = tf.train.AdamOptimizer(lr)
         opt = optimizer.AccumGradOptimizer(opt, factor)
     else:
         opt = tf.train.AdamOptimizer(lr)
     #opt = tf.train.MomentumOptimizer(lr, 0.9)
     return opt