def solve(global_step): """add solver to losses""" # learning reate lr = _configure_learning_rate(82783, global_step) optimizer = _configure_optimizer(lr) tf.summary.scalar('learning_rate', lr) # compute and apply gradient losses = tf.get_collection(tf.GraphKeys.LOSSES) regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) regular_loss = tf.add_n(regular_losses) out_loss = tf.add_n(losses) total_loss = tf.add_n(losses + regular_losses) tf.summary.scalar('total_loss', total_loss) tf.summary.scalar('out_loss', out_loss) tf.summary.scalar('regular_loss', regular_loss) ### add the center loss into the summary update_ops = [] variables_to_train = _get_variables_to_train() # update_op = optimizer.minimize(total_loss) gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train) grad_updates = optimizer.apply_gradients(gradients, global_step=global_step) update_ops.append(grad_updates) # update moving mean and variance if FLAGS.update_bn: update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS) update_bn = tf.group(*update_bns) update_ops.append(update_bn) return tf.group(*update_ops)
def solve(global_step): """add solver to losses""" # learning reate lr = _configure_learning_rate(82783, global_step) optimizer = _configure_optimizer(lr) tf.summary.scalar('learning_rate', lr) # compute and apply gradient losses = tf.get_collection(tf.GraphKeys.LOSSES) regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) regular_loss = tf.add_n(regular_losses) out_loss = tf.add_n(losses) total_loss = tf.add_n(losses + regular_losses) tf.summary.scalar('total_loss', total_loss) tf.summary.scalar('out_loss', out_loss) tf.summary.scalar('regular_loss', regular_loss) update_ops = [] variables_to_train = _get_variables_to_train() # update_op = optimizer.minimize(total_loss) gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train) grad_updates = optimizer.apply_gradients(gradients, global_step=global_step) update_ops.append(grad_updates) # update moving mean and variance if FLAGS.update_bn: update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS) update_bn = tf.group(*update_bns) update_ops.append(update_bn) return tf.group(*update_ops)
def solve(global_step): """add solver to losses""" # learning reate lr = _configure_learning_rate(82783, global_step) optimizer = _configure_optimizer(lr) tf.summary.scalar('learning_rate', lr) # compute and apply gradient losses = tf.get_collection(tf.GraphKeys.LOSSES) loss = tf.add_n(losses) regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) regular_loss = tf.add_n(regular_losses) total_loss = loss + regular_loss tf.summary.scalar('total_loss', total_loss) tf.summary.scalar('loss', loss) tf.summary.scalar('regular_loss', regular_loss) # update_ops = [] # variables_to_train = _get_variables_to_train() # update_op = optimizer.minimize(total_loss) # gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train) # grad_updates = optimizer.apply_gradients(gradients, # global_step=global_step) # update_ops.append(grad_updates) ## update moving mean and variance # if FLAGS.update_bn: # update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # update_bn = tf.group(*update_bns) # # update_ops.append(update_bn) # total_loss = control_flow_ops.with_dependencies([update_bn], total_loss) # train_op = slim.learning.create_train_op(total_loss, optimizer) if FLAGS.update_bn: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = slim.learning.create_train_op(total_loss, optimizer, global_step=global_step) else: train_op = slim.learning.create_train_op(total_loss, optimizer, global_step=global_step) return train_op
loss, losses, batch_info = pyramid_network.build_losses( pyramid, outputs, gt_boxes, gt_masks, num_classes=81, base_anchors=9, rpn_box_lw=0.1, rpn_cls_lw=0.2, refined_box_lw=2.0, refined_cls_lw=0.1, mask_lw=0.2) ## optimization learning_rate = _configure_learning_rate(82783, global_step) optimizer = _configure_optimizer(learning_rate) summaries.add(tf.summary.scalar('learning_rate', learning_rate)) for loss in tf.get_collection(tf.GraphKeys.LOSSES): summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) loss = tf.get_collection(tf.GraphKeys.LOSSES) regular_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = tf.add_n(loss + regular_loss) reg_loss = tf.add_n(regular_loss) summaries.add(tf.summary.scalar('total_loss', total_loss)) summaries.add(tf.summary.scalar('regular_loss', reg_loss)) variables_to_train = _get_variables_to_train() update_op = optimizer.minimize(total_loss) # gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train) # grad_updates = optimizer.apply_gradients(gradients,