def _configure_optimizer(learning_rate, opt_type='adam'):
    if opt_type == 'adadelta':
        optimizer = training.AdadeltaOptimizer(learning_rate,
                                               rho=FLAGS.adadelta_rho,
                                               epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'adagrad':
        optimizer = training.AdagradOptimizer(
            learning_rate,
            initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
    elif opt_type == 'adam':
        optimizer = training.AdamOptimizer(learning_rate, )
    elif opt_type == 'ftrl':
        optimizer = training.FtrlOptimizer(
            learning_rate,
            learning_rate_power=FLAGS.ftrl_learning_rate_power,
            initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
            l1_regularization_strength=FLAGS.ftrl_l1,
            l2_regularization_strength=FLAGS.ftrl_l2)
    elif opt_type == 'momentum':
        optimizer = training.MomentumOptimizer(learning_rate,
                                               momentum=FLAGS.momentum,
                                               name='Momentum')
    elif opt_type == 'rmsprop':
        optimizer = training.RMSPropOptimizer(learning_rate,
                                              decay=FLAGS.rmsprop_decay,
                                              momentum=FLAGS.rmsprop_momentum,
                                              epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'sgd':
        optimizer = training.GradientDescentOptimizer(learning_rate)
    else:
        raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
    return optimizer
示例#2
0
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train

OPTIMIZER_CLS_NAMES = {
    "Adagrad":
    train.AdagradOptimizer,
    "Adam":
    train.AdamOptimizer,
    "Ftrl":
    train.FtrlOptimizer,
    "Momentum":
    lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9),  # pylint: disable=line-too-long
    "RMSProp":
    train.RMSPropOptimizer,
    "SGD":
    train.GradientDescentOptimizer,
}

OPTIMIZER_SUMMARIES = [
    "learning_rate",
    "loss",
    "gradients",
    "gradient_norm",
    "global_gradient_norm",
]

示例#3
0
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train

OPTIMIZER_CLS_NAMES = {
    "Adagrad": train.AdagradOptimizer,
    "Adam": train.AdamOptimizer,
    "Ftrl": train.FtrlOptimizer,
    "Momentum": lambda lr: train.MomentumOptimizer(lr, momentum=0.9),
    "RMSProp": train.RMSPropOptimizer,
    "SGD": train.GradientDescentOptimizer,
}

OPTIMIZER_SUMMARIES = [
    "learning_rate",
    "loss",
    "gradients",
    "gradient_norm",
    "global_gradient_norm",
]


def optimize_loss(loss,
                  global_step,