예제 #1
0
 def testDense(self):
     decay_steps = 10
     sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
     py_sign_decay_fn = py_linear_decay_fn(decay_steps)
     self._testDense()
     self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
     self._testDense(sign_decay_fn=sign_decay_fn,
                     py_sign_decay_fn=py_sign_decay_fn)
예제 #2
0
 def testDense(self):
   decay_steps = 10
   sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
   py_sign_decay_fn = py_linear_decay_fn(decay_steps)
   self._testDense()
   self._testDense(learning_rate=0.1, base=10.0, beta=0.8)
   self._testDense(
       sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
예제 #3
0
  def testLinearDecay(self):
    num_training_steps = 1000
    linear_decay_fn = sign_decay.get_linear_decay_fn(num_training_steps)

    for step in range(0, 1000, 100):
      with self.test_session():
        tf_decayed = linear_decay_fn(step).eval()
        py_decayed = py_linear_decay_fn(num_training_steps)(step)
        self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
예제 #4
0
    def testLinearDecay(self):
        num_training_steps = 1000
        linear_decay_fn = sign_decay.get_linear_decay_fn(num_training_steps)

        for step in range(0, 1000, 100):
            with self.test_session():
                tf_decayed = linear_decay_fn(step).eval()
                py_decayed = py_linear_decay_fn(num_training_steps)(step)
                self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
예제 #5
0
def get_opt(name, learning_rate, decay_steps=None):
    if name == 'momentum':
        optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
    elif name == 'adam':
        optimizer = tf.train.AdamOptimizer(learning_rate,
                                           beta2=0.98,
                                           epsilon=1e-9)
    elif name == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    elif name == 'rms':
        optimizer = tf.train.RMSPropOptimizer(learning_rate)
    elif name == 'adagrad':
        optimizer = tf.train.AdagradOptimizer(learning_rate)
    elif name == 'lazyadam':
        optimizer = LazyAdamOptimizer(learning_rate)
    elif name == 'powersign':
        optimizer = PowerSignOptimizer(learning_rate)
    elif name == 'powersign-ld':
        optimizer = PowerSignOptimizer(
            learning_rate, sign_decay_fn=get_linear_decay_fn(decay_steps))
    elif name == 'powersign-cd':
        optimizer = PowerSignOptimizer(
            learning_rate, sign_decay_fn=get_cosine_decay_fn(decay_steps))
    elif name == 'powersign-rd':
        optimizer = PowerSignOptimizer(
            learning_rate, sign_decay_fn=get_restart_decay_fn(decay_steps))
    elif name == 'addsign':
        optimizer = AddSignOptimizer(learning_rate)
    elif name == 'addsign-ld':
        optimizer = AddSignOptimizer(
            learning_rate, sign_decay_fn=get_linear_decay_fn(decay_steps))
    elif name == 'addsign-cd':
        optimizer = AddSignOptimizer(
            learning_rate, sign_decay_fn=get_cosine_decay_fn(decay_steps))
    elif name == 'addsign-rd':
        optimizer = AddSignOptimizer(
            learning_rate, sign_decay_fn=get_restart_decay_fn(decay_steps))
    else:
        optimizer = None

    return optimizer
예제 #6
0
 def testSparse(self):
     decay_steps = 10
     sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
     py_sign_decay_fn = py_linear_decay_fn(decay_steps)
     self._testSparse(use_resource=False)
     self._testSparse(use_resource=False,
                      learning_rate=0.01,
                      base=2.0,
                      beta=0.8)
     self._testSparse(use_resource=False,
                      sign_decay_fn=sign_decay_fn,
                      py_sign_decay_fn=py_sign_decay_fn)
예제 #7
0
 def testSparse(self):
   decay_steps = 10
   sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
   py_sign_decay_fn = py_linear_decay_fn(decay_steps)
   self._testSparse(use_resource=False)
   self._testSparse(use_resource=False,
                    learning_rate=0.01,
                    base=2.0,
                    beta=0.8)
   self._testSparse(use_resource=False,
                    sign_decay_fn=sign_decay_fn,
                    py_sign_decay_fn=py_sign_decay_fn)
예제 #8
0
  def testDense(self):
    decay_steps = 10
    sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
    py_sign_decay_fn = py_linear_decay_fn(decay_steps)
    self._testDense(use_resource=False)
    self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)
    self._testDense(use_resource=False,
                    sign_decay_fn=sign_decay_fn,
                    py_sign_decay_fn=py_sign_decay_fn)

    self._testDense(use_resource=True)
    self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)
    self._testDense(use_resource=True,
                    sign_decay_fn=sign_decay_fn,
                    py_sign_decay_fn=py_sign_decay_fn)
예제 #9
0
  def testDense(self):
    decay_steps = 10
    sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
    py_sign_decay_fn = py_linear_decay_fn(decay_steps)
    self._testDense(use_resource=False)
    self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)
    self._testDense(use_resource=False,
                    sign_decay_fn=sign_decay_fn,
                    py_sign_decay_fn=py_sign_decay_fn)

    self._testDense(use_resource=True)
    self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)
    self._testDense(use_resource=True,
                    sign_decay_fn=sign_decay_fn,
                    py_sign_decay_fn=py_sign_decay_fn)
예제 #10
0
def get_optimizer(opt_params, lr):
    """Helper to get optimizer from text params

    Parameters
    ----------
    opt_params: dict
        Dictionary containing optimization function name and learning rate decay
    lr:  float
        Initial learning rate

    Return
    ------
    opt_function: Keras optimizer
    """

    if opt_params['opt_func'] == 'sgd':
        return SGD(lr=lr, momentum=opt_params['momentum'])
    elif opt_params['opt_func'] == 'adam':
        return Adam(lr=lr)
    elif opt_params['opt_func'] == 'rmsprop':
        return rmsprop(lr=lr)
    elif opt_params['opt_func'] == 'nadam':
        return Nadam(lr=lr)
    elif opt_params['opt_func'] == 'powersign':
        from tensorflow.contrib.opt.python.training import sign_decay as sd
        d_steps = opt_params['pwr_sign_decay_steps']
        # Define the decay function (if specified)
        if opt_params['pwr_sign_decay_func'] == 'lin':
            decay_func = sd.get_linear_decay_fn(d_steps)
        elif opt_params['pwr_sign_decay_func'] == 'cos':
            decay_func = sd.get_consine_decay_fn(d_steps)
        elif opt_params['pwr_sign_decay_func'] == 'res':
            decay_func = sd.get_restart_decay_fn(d_steps,
                                                 num_periods=opt_params['pwr_sign_decay_periods'])
        elif opt_params['decay_func'] is None:
            decay_func = None
        else:
            raise ValueError('decay function not specified correctly')

        # Use decay function in TF optimizer
        return TFOptimizer(PowerSignOptimizer(learning_rate=lr,
                                              sign_decay_fn=decay_func))
    else:
        raise ValueError