示例#1
0
def build_optimizer(learning_rate, loss, l2_loss, update_vars, global_step):
    """
    生成优化器
    :return:
    """
    optimizer = config_optimizer(train_args.optimizer_name, learning_rate)
    # BN操作
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        # 梯度下降
        gvs = optimizer.compute_gradients(
            loss[0] + l2_loss, var_list=update_vars)  # 只优化update_vars中参数
        # 应用gradient clip, 防止梯度爆炸
        clip_grad_var = [
            gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]]
            for gv in gvs
        ]
        train_op = optimizer.apply_gradients(clip_grad_var,
                                             global_step=global_step)
    return train_op
                    tf.less(
                        global_step, train_batch_num *
                        config_common.model_set["warm_up_epoch"]),
                    lambda: config_common.model_set["warm_up_lr"],
                    lambda: config_learning_rate(
                        config_common, global_step - train_batch_num *
                        config_common.model_set["warm_up_epoch"
                                                ], lr_decay_freq))
            else:
                learning_rate = config_learning_rate(config_common,
                                                     global_step,
                                                     lr_decay_freq)

            tf.summary.scalar('learning_rate', learning_rate)

            optimizer = config_optimizer(
                config_common.model_set["optimizer_name"], learning_rate)
            #add my clip
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(
                optimizer, 3.0)
            with tf.device(gpu_device[0]):
                tower_grads = []
                with tf.variable_scope(tf.get_variable_scope()):
                    for gpu_id in range(len(gpu_device)):
                        with tf.device(gpu_device[gpu_id]):
                            with tf.name_scope('%s_%d' %
                                               ('tower', gpu_id)) as scope:
                                # get an element from the choosed dataset iterator
                                image, y_true_13, y_true_26, y_true_52, img_path = dataset_iterator.get_next(
                                )
                                # 目标中心位于那个anchor,那个anchor就负责检测这个物体,他的x,y,w,h就是真是目标的数据,其余的anchor为0
                                y_true = [y_true_13, y_true_26, y_true_52]
global_step = tf.Variable(0,
                          trainable=False,
                          collections=[tf.GraphKeys.LOCAL_VARIABLES])
if args.use_warm_up:
    learning_rate = tf.cond(
        tf.less(global_step, args.train_batch_num * args.warm_up_epoch),
        lambda: args.warm_up_lr, lambda: config_learning_rate(
            args, global_step - args.train_batch_num * args.warm_up_epoch))
else:
    learning_rate = config_learning_rate(args, global_step)
tf.summary.scalar('learning_rate', learning_rate)

if not args.save_optimizer:
    saver_to_save = tf.train.Saver()

optimizer = config_optimizer(args.optimizer_name, learning_rate)

if args.save_optimizer:
    saver_to_save = tf.train.Saver()

# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_op = optimizer.minimize(loss[0],
                                  var_list=update_vars,
                                  global_step=global_step)

with tf.Session() as sess:
    sess.run([
        tf.global_variables_initializer(),
        tf.local_variables_initializer(), train_iterator.initializer
示例#4
0
                    args.train_batch_num * args.warm_up_epoch),
            lambda: args.learning_rate_init * global_step_finetune /
            (args.train_batch_num * args.warm_up_epoch),
            lambda: config_learning_rate(
                args, global_step_finetune - args.train_batch_num * args.
                warm_up_epoch))
    else:
        learning_rate_finetune = config_learning_rate(args,
                                                      global_step_finetune)
    tf.summary.scalar('learning_rate', learning_rate_finetune)

    if not args.save_optimizer:
        saver_best_finetune = tf.train.Saver()
        print("[INFO] save_best_finetune construct _not ")

    optimizer_finetune = config_optimizer(args.optimizer_name,
                                          learning_rate_finetune)
    update_vars_finetune = tf.contrib.framework.get_variables_to_restore(
        include=prune_model._update_part)
    if args.save_optimizer:
        saver_best_finetune = tf.train.Saver()
        print("[INFO] save_best_finetune construct _true")

    # set dependencies for BN ops
    update_ops_finetune = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops_finetune):
        train_op_finetune = optimizer_finetune.minimize(
            loss_fintune[0] + l2_loss_fintune,
            var_list=update_vars_finetune,
            global_step=global_step_finetune)

#################################### begining finetune ###########################################
示例#5
0
global_step = tf.Variable(0,
                          trainable=False,
                          collections=[tf.GraphKeys.LOCAL_VARIABLES])
if flag.use_warm_up:
    learning_rate = tf.cond(
        tf.less(global_step, flag.train_batch_num * flag.warm_up_epoch),
        lambda: flag.warm_up_lr, lambda: config_learning_rate(
            flag, global_step - flag.train_batch_num * flag.warm_up_epoch))
else:
    learning_rate = config_learning_rate(flag, global_step)
tf.summary.scalar('learning_rate', learning_rate)

if not flag.save_optimizer:
    saver_to_save = tf.train.Saver()

optimizer = config_optimizer(flag.optimizer_name, learning_rate)

if flag.save_optimizer:
    saver_to_save = tf.train.Saver()

# set dependencies for BN ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_op = optimizer.minimize(loss[0],
                                  var_list=update_vars,
                                  global_step=global_step)

with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    sess.run([
        tf.global_variables_initializer(),
        tf.local_variables_initializer(), train_iterator.initializer