def adjust_learning_rate(optimizer,
                         epoch,
                         i_iter,
                         iters_per_epoch,
                         method='poly'):
    if method == 'poly':
        current_step = epoch * iters_per_epoch + i_iter
        max_step = args.epochs * iters_per_epoch
        # poly
        lr = args.learning_rate * ((1 - current_step / max_step)**0.9)
        # if epoch>args.epochs*0.9:
        #     lr = 0.1*args.learning_rate
    elif method == 'cosine':
        lr = cosine_decay(base_learning_rate=args.learning_rate,
                          global_step=epoch * iters_per_epoch + i_iter,
                          warm_step=10 * iters_per_epoch,
                          decay_steps=args.epochs * iters_per_epoch,
                          alpha=0.0001)
    elif method == 'restart_cosine':
        lr = restart_cosine_decay(base_learning_rate=args.learning_rate,
                                  global_step=epoch * iters_per_epoch + i_iter,
                                  warm_step=10 * iters_per_epoch,
                                  decay_steps=args.epochs * iters_per_epoch,
                                  alpha=0.0001)
    else:
        lr = args.learning_rate
    optimizer.param_groups[0]['lr'] = lr
    return lr
Esempio n. 2
0
def adjust_learning_rate(optimizer, epoch, i_iter, iters_per_epoch, method='poly'):
    if method == 'poly':
        current_step = epoch * iters_per_epoch + i_iter
        max_step = args.epochs * iters_per_epoch
        lr = args.learning_rate * ((1 - current_step / max_step) ** 0.9)
    elif method == 'cosine':
        warm_step = 5 * iters_per_epoch
        warm_lr = 0.001*args.learning_rate
        decay_steps = 10 * iters_per_epoch
        lr = cosine_decay(learning_rate=args.learning_rate, global_step=epoch * iters_per_epoch + i_iter,
                          warm_step=warm_step, warm_lr= warm_lr,
                          decay_steps=decay_steps, alpha=0.0001)

    elif method == 'restart_cosine':
        warm_step = 5 * iters_per_epoch
        warm_lr = 0.001 * args.learning_rate
        decay_steps = 10 * iters_per_epoch
        lr = restart_cosine_decay(learning_rate=args.learning_rate, global_step=epoch * iters_per_epoch + i_iter,
                          warm_step=warm_step, warm_lr=warm_lr,
                          decay_steps=decay_steps, alpha=0.0001)
    else:
        lr = args.learning_rate
    optimizer.param_groups[0]['lr'] = lr
    return lr