Example #1
0
    def get_lr(self, runner, base_lr):
        if self.by_epoch:
            progress = runner.epoch
            max_progress = runner.max_epochs
        else:
            progress = runner.iter
            max_progress = runner.max_iters

        target_lr = self.min_lr
        if self.warmup is not None:
            progress = progress - self.warmup_iters
            max_progress = max_progress - self.warmup_iters
        factor = progress / max_progress
        return annealing_cos(base_lr, target_lr, factor)
Example #2
0
    def get_lr(self, runner, base_lr):
        if self.last_epoch == -1:
            self.last_epoch = runner.max_epochs

        if self.by_epoch:
            progress = runner.epoch
            max_progress = self.last_epoch
        else:
            progress = runner.iter
            max_progress = runner.max_iters

        if self.min_lr_ratio is not None:
            target_lr = base_lr * self.min_lr_ratio
        else:
            target_lr = self.min_lr

        if self.by_epoch and self.last_epoch < progress:
            return target_lr

        return annealing_cos(base_lr, target_lr, progress / max_progress)
Example #3
0
    def get_lr(self, runner, base_lr):
        last_iter = len(runner.data_loader) * self.num_last_epochs

        if self.by_epoch:
            progress = runner.epoch
            max_progress = runner.max_epochs
        else:
            progress = runner.iter
            max_progress = runner.max_iters

        progress += 1

        if self.min_lr_ratio is not None:
            target_lr = base_lr * self.min_lr_ratio
        else:
            target_lr = self.min_lr

        if progress >= max_progress - last_iter:
            # fixed learning rate
            return target_lr
        else:
            return annealing_cos(
                base_lr, target_lr, (progress - self.warmup_iters) /
                (max_progress - self.warmup_iters - last_iter))