Beispiel #1
0
def adjust_learning_rate(optimizer, epoch, args):
    lr = cosine_decay(args.init_lr, args.final_lr, epoch, args.epochs)

    if optimizer.param_groups[0]['lr'] != lr:
        LOGGER.log_event("learning_rate changed",
                         value=str(optimizer.param_groups[0]['lr']) + " -> " + str(lr))

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
Beispiel #2
0
def adjust_learning_rate(epoch, optimizer, learning_rate,
                         anneal_steps, anneal_factor):

    p = 0
    if anneal_steps is not None:
        for i, a_step in enumerate(anneal_steps):
            if epoch >= int(a_step):
                p = p+1

    if anneal_factor == 0.3:
        lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))
    else:
        lr = learning_rate*(anneal_factor ** p)

    if optimizer.param_groups[0]['lr'] != lr:
        LOGGER.log_event("learning_rate changed",
                         value=str(optimizer.param_groups[0]['lr']) + " -> " + str(lr))

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr