def _configure_optimizer(learning_rate, opt_type='adam'):
    if opt_type == 'adadelta':
        optimizer = training.AdadeltaOptimizer(learning_rate,
                                               rho=FLAGS.adadelta_rho,
                                               epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'adagrad':
        optimizer = training.AdagradOptimizer(
            learning_rate,
            initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
    elif opt_type == 'adam':
        optimizer = training.AdamOptimizer(learning_rate, )
    elif opt_type == 'ftrl':
        optimizer = training.FtrlOptimizer(
            learning_rate,
            learning_rate_power=FLAGS.ftrl_learning_rate_power,
            initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
            l1_regularization_strength=FLAGS.ftrl_l1,
            l2_regularization_strength=FLAGS.ftrl_l2)
    elif opt_type == 'momentum':
        optimizer = training.MomentumOptimizer(learning_rate,
                                               momentum=FLAGS.momentum,
                                               name='Momentum')
    elif opt_type == 'rmsprop':
        optimizer = training.RMSPropOptimizer(learning_rate,
                                              decay=FLAGS.rmsprop_decay,
                                              momentum=FLAGS.rmsprop_momentum,
                                              epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'sgd':
        optimizer = training.GradientDescentOptimizer(learning_rate)
    else:
        raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
    return optimizer
Example #2
0
def _get_default_optimizer(feature_columns):
  learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
  return train.FtrlOptimizer(learning_rate=learning_rate)