Beispiel #1
0
def test(loss, globalStep, **kwargs):
    # Generate moving averages of all losses and associated summaries.
    lossAveragesOp = loss_base.add_loss_summaries(loss, kwargs.get('activeBatchSize', None))
    
    with tf.control_dependencies([]):
        opTest = tf.no_op(name='test')

    return opTest
Beispiel #2
0
def train(loss, globalStep, **kwargs):
    if kwargs.get('optimizer') == 'MomentumOptimizer':
        optimizerParams = optimizer_params.get_momentum_optimizer_params(
            globalStep, **kwargs)
    if kwargs.get('optimizer') == 'AdamOptimizer':
        optimizerParams = optimizer_params.get_adam_optimizer_params(
            globalStep, **kwargs)
    if kwargs.get('optimizer') == 'GradientDescentOptimizer':
        optimizerParams = optimizer_params.get_gradient_descent_optimizer_params(
            globalStep, **kwargs)

    # Generate moving averages of all losses and associated summaries.
    lossAveragesOp = loss_base.add_loss_summaries(
        loss, kwargs.get('activeBatchSize', None))

    # Compute gradients.
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)  # for batchnorm
    tvars = tf.trainable_variables()
    with tf.control_dependencies(update_ops):
        if kwargs.get('optimizer') == 'AdamOptimizer':
            optim = tf.train.AdamOptimizer(
                learning_rate=optimizerParams['learningRate'],
                epsilon=optimizerParams['epsilon'])
        if kwargs.get('optimizer') == 'MomentumOptimizer':
            optim = tf.train.MomentumOptimizer(
                learning_rate=optimizerParams['learningRate'],
                momentum=optimizerParams['momentum'])
        if kwargs.get('optimizer') == 'GradientDescentOptimizer':
            optim = tf.train.GradientDescentOptimizer(
                learning_rate=optimizerParams['learningRate'])
        if kwargs.get('optimizer') == 'AdaGrad':
            optim = tf.train.AdamOptimizer(
                learning_rate=optimizerParams['learningRate'])

        grads, norm = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                             kwargs.get('clipNorm'))

    # Apply gradients.
    #applyGradientOp = opt.apply_gradients(grads, global_step=globalStep)
    #train_op = opt.apply_gradients(gradsNvars, global_step=globalStep)
    opApplyGradients = optim.apply_gradients(zip(grads, tvars),
                                             global_step=globalStep)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var)

    # Add histograms for gradients.
    for grad, var in zip(grads, tvars):
        if grad is not None:
            tf.summary.histogram(var.op.name + '/gradients', grad)

    with tf.control_dependencies([opApplyGradients]):
        opTrain = tf.no_op(name='train')

    return opTrain