Пример #1
0
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch,
                         args):
    """decrease the learning rate at 100 and 150 epoch"""
    lr = args.lr
    epoch = epoch + step_in_epoch / total_steps_in_epoch

    # LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
    #lr = ramps.linear_rampup(epoch, args.lr_rampup) * (args.lr - args.initial_lr) + args.initial_lr

    # Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
    #"""
    if not args.step_lr:
        lr *= ramps.cosine_rampdown(epoch, args.epochs)
    #"""
    #MultiStep LR
    else:
        #if epoch >= 150:
        #    lr /= 10
        #if epoch >= 250:
        #    lr /= 10
        for milestone in args.schedule:
            lr *= 0.1 if epoch >= milestone else 1.
    #"""
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
Пример #2
0
    def adjust_learning_rate(self, optimizer, lr, ini_lr, epoch):

        # LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
        lr = ramps.linear_rampup(
            epoch, self.config.lr_rampup) * (lr - ini_lr) + ini_lr

        # Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
        if self.config.lr_rampdn:
            assert self.config.lr_rampdn >= self.config.max_epochs
            lr *= ramps.cosine_rampdown(epoch, self.config.lr_rampdn)

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
Пример #3
0
def adjust_learning_rate(optimizer, epoch, step_in_epoch,
                         total_steps_in_epoch):
    lr = args.lr
    epoch = epoch + step_in_epoch / total_steps_in_epoch

    lr = ramps.linear_rampup(
        epoch, args.lr_rampup) * (args.lr - args.initial_lr) + args.initial_lr

    if args.lr_rampdown_epochs:
        assert args.lr_rampdown_epochs >= args.epochs
        lr *= ramps.cosine_rampdown(epoch, args.lr_rampdown_epochs)

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
Пример #4
0
def adjust_learning_rate(optimizer, epoch, step_in_epoch,
                         total_steps_in_epoch):
    lr = args.lr
    epoch = epoch + step_in_epoch / total_steps_in_epoch

    # LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
    lr = ramps.linear_rampup(
        epoch, args.lr_rampup) * (args.lr - args.initial_lr) + args.initial_lr

    # Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
    if args.lr_rampdown_epochs:
        assert args.lr_rampdown_epochs >= args.epochs
        lr *= ramps.cosine_rampdown(epoch, args.lr_rampdown_epochs)

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return lr