Exemplo n.º 1
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        for grad, var in grads:
            utils.add_gradient_summary(grad. var)
    return optimizer.apply_gradients(grads)
Exemplo n.º 2
0
 def _training(self, global_step):
     """
     Setup the training phase with Adam
     :param global_step: global step of training
     """
     optimizer = tf.train.AdamOptimizer(self.lr)
     grads = optimizer.compute_gradients(self.loss_op)
     for grad, var in grads:
         tf_utils.add_gradient_summary(grad, var, collections=['train'])
     return optimizer.apply_gradients(grads, global_step=global_step)
Exemplo n.º 3
0
    tf.summary.scalar('mean_iou', mean_IOU)
    tf.summary.scalar('reg_loss', reg_loss)
    # tf.summary.scalar('learning_rate', decay_learning_rate)

with tf.name_scope('summary_ious'):
    add_iou_summary(IOUs, pascal_voc_classes)

with tf.name_scope('summary_vars'):
    for weight in weight_vars:
        add_var_summary(weight)
    for bias in bias_vars:
        add_var_summary(bias)

with tf.name_scope('summary_grads'):
    for grad, var in weight_grads:
        add_gradient_summary(grad, var)
    for grad, var in bias_grads:
        add_gradient_summary(grad, var)

# with tf.name_scope('summary_activations'):
#     for activations in endpoints.keys():
#         add_activation_summary(endpoints[activations])

merge_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir, sess.graph)
saver = tf.train.Saver(max_to_keep=3)

# initialize
ckpt = None
if FLAGS.last_ckpt is not None:
    ckpt = tf.train.latest_checkpoint(FLAGS.last_ckpt)