Пример #1
0
def get_learning_rate(global_step):
    """
    学习率
    :param global_step:
    :return:
    """
    if train_args.use_warm_up:
        learning_rate = tf.cond(
            tf.less(global_step,
                    train_args.train_batch_num * train_args.warm_up_epoch),
            lambda: train_args.learning_rate_init * global_step /
            (train_args.train_batch_num * train_args.warm_up_epoch),
            lambda: config_learning_rate(
                train_args, global_step - train_args.train_batch_num *
                train_args.warm_up_epoch))
    else:
        learning_rate = config_learning_rate(train_args, global_step)
    return learning_rate
            ################
            # register the gpu nms operation here for the following evaluation scheme
            pred_boxes_flag = tf.placeholder(tf.float32, [1, None, None])
            pred_scores_flag = tf.placeholder(tf.float32, [1, None, None])
            gpu_nms_op = gpu_nms(pred_boxes_flag, pred_scores_flag, class_num)
            ################
            global_step = tf.Variable(
                0, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
            if config_common.model_set["use_warm_up"]:
                learning_rate = tf.cond(
                    tf.less(
                        global_step, train_batch_num *
                        config_common.model_set["warm_up_epoch"]),
                    lambda: config_common.model_set["warm_up_lr"],
                    lambda: config_learning_rate(
                        config_common, global_step - train_batch_num *
                        config_common.model_set["warm_up_epoch"
                                                ], lr_decay_freq))
            else:
                learning_rate = config_learning_rate(config_common,
                                                     global_step,
                                                     lr_decay_freq)

            tf.summary.scalar('learning_rate', learning_rate)

            optimizer = config_optimizer(
                config_common.model_set["optimizer_name"], learning_rate)
            #add my clip
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(
                optimizer, 3.0)
            with tf.device(gpu_device[0]):
                tower_grads = []
Пример #3
0
update_vars = tf.contrib.framework.get_variables_to_restore(
    include=args.update_part)

tf.summary.scalar('train_batch_statistics/total_loss', loss[0])
tf.summary.scalar('train_batch_statistics/loss_xy', loss[1])
tf.summary.scalar('train_batch_statistics/loss_wh', loss[2])
tf.summary.scalar('train_batch_statistics/loss_conf', loss[3])
tf.summary.scalar('train_batch_statistics/loss_class', loss[4])

global_step = tf.Variable(0,
                          trainable=False,
                          collections=[tf.GraphKeys.LOCAL_VARIABLES])
if args.use_warm_up:
    learning_rate = tf.cond(
        tf.less(global_step, args.train_batch_num * args.warm_up_epoch),
        lambda: args.warm_up_lr, lambda: config_learning_rate(
            args, global_step - args.train_batch_num * args.warm_up_epoch))
else:
    learning_rate = config_learning_rate(args, global_step)
tf.summary.scalar('learning_rate', learning_rate)

if not args.save_optimizer:
    saver_to_save = tf.train.Saver()

optimizer = config_optimizer(args.optimizer_name, learning_rate)

if args.save_optimizer:
    saver_to_save = tf.train.Saver()

# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
Пример #4
0
pred_boxes_flag = tf.placeholder(tf.float32, [1, None, None])
pred_scores_flag = tf.placeholder(tf.float32, [1, None, None])
gpu_nms_op = gpu_nms(pred_boxes_flag, pred_scores_flag, args.class_num)
################

saver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to_restore(include=args.restore_part))
update_vars = tf.contrib.framework.get_variables_to_restore(include=args.update_part)

tf.summary.scalar('train_batch_statistics/total_loss', loss[0])
tf.summary.scalar('train_batch_statistics/loss_xy', loss[1])
tf.summary.scalar('train_batch_statistics/loss_wh', loss[2])
tf.summary.scalar('train_batch_statistics/loss_conf', loss[3])
tf.summary.scalar('train_batch_statistics/loss_class', loss[4])

global_step = tf.Variable(0, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
learning_rate = config_learning_rate(args, global_step)
tf.summary.scalar('learning_rate', learning_rate)

if not args.save_optimizer:
    saver_to_save = tf.train.Saver()

optimizer = config_optimizer(args.optimizer_name, learning_rate)

if args.save_optimizer:
    saver_to_save = tf.train.Saver()

# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_op = optimizer.minimize(loss[0], var_list=update_vars, global_step=global_step)
Пример #5
0
    tf.summary.scalar('train_batch_statistics/loss_l2', l2_loss_fintune)
    tf.summary.scalar('train_batch_statistics/loss_ratio',
                      l2_loss_fintune / loss_fintune[0])

    global_step_finetune = tf.Variable(
        float(args.global_step),
        trainable=False,
        collections=[tf.GraphKeys.LOCAL_VARIABLES])
    if args.use_warm_up:
        learning_rate_finetune = tf.cond(
            tf.less(global_step_finetune,
                    args.train_batch_num * args.warm_up_epoch),
            lambda: args.learning_rate_init * global_step_finetune /
            (args.train_batch_num * args.warm_up_epoch),
            lambda: config_learning_rate(
                args, global_step_finetune - args.train_batch_num * args.
                warm_up_epoch))
    else:
        learning_rate_finetune = config_learning_rate(args,
                                                      global_step_finetune)
    tf.summary.scalar('learning_rate', learning_rate_finetune)

    if not args.save_optimizer:
        saver_best_finetune = tf.train.Saver()
        print("[INFO] save_best_finetune construct _not ")

    optimizer_finetune = config_optimizer(args.optimizer_name,
                                          learning_rate_finetune)
    update_vars_finetune = tf.contrib.framework.get_variables_to_restore(
        include=prune_model._update_part)
    if args.save_optimizer:
Пример #6
0
update_vars = tf.contrib.framework.get_variables_to_restore(
    include=flag.update_part)

tf.summary.scalar('train_batch_statistics/total_loss', loss[0])
tf.summary.scalar('train_batch_statistics/loss_xy', loss[1])
tf.summary.scalar('train_batch_statistics/loss_wh', loss[2])
tf.summary.scalar('train_batch_statistics/loss_conf', loss[3])
tf.summary.scalar('train_batch_statistics/loss_class', loss[4])

global_step = tf.Variable(0,
                          trainable=False,
                          collections=[tf.GraphKeys.LOCAL_VARIABLES])
if flag.use_warm_up:
    learning_rate = tf.cond(
        tf.less(global_step, flag.train_batch_num * flag.warm_up_epoch),
        lambda: flag.warm_up_lr, lambda: config_learning_rate(
            flag, global_step - flag.train_batch_num * flag.warm_up_epoch))
else:
    learning_rate = config_learning_rate(flag, global_step)
tf.summary.scalar('learning_rate', learning_rate)

if not flag.save_optimizer:
    saver_to_save = tf.train.Saver()

optimizer = config_optimizer(flag.optimizer_name, learning_rate)

if flag.save_optimizer:
    saver_to_save = tf.train.Saver()

# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):