Example #1
0
def test_lr_scheduler_asserts():

    with pytest.raises(
        TypeError, match=r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler"
    ):
        LRScheduler(123)

    with pytest.raises(
        TypeError, match=r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler"
    ):
        LRScheduler.simulate_values(1, None)
Example #2
0
    def _run(
        self,
        trainer: Engine,
        optimizer: Optimizer,
        output_transform: Callable,
        num_iter: int,
        start_lr: float,
        end_lr: float,
        step_mode: str,
        smooth_f: float,
        diverge_th: float,
    ) -> None:

        self._history = {"lr": [], "loss": []}
        self._best_loss = None
        self._diverge_flag = False

        # attach LRScheduler to trainer.
        if num_iter is None:
            num_iter = trainer.state.epoch_length * trainer.state.max_epochs
        else:
            max_iter = trainer.state.epoch_length * trainer.state.max_epochs  # type: ignore[operator]
            if max_iter < num_iter:
                max_iter = num_iter
                trainer.state.max_iters = num_iter
                trainer.state.max_epochs = ceil(
                    num_iter /
                    trainer.state.epoch_length)  # type: ignore[operator]

        if not trainer.has_event_handler(self._reached_num_iterations):
            trainer.add_event_handler(Events.ITERATION_COMPLETED,
                                      self._reached_num_iterations, num_iter)

        # attach loss and lr logging
        if not trainer.has_event_handler(self._log_lr_and_loss):
            trainer.add_event_handler(Events.ITERATION_COMPLETED,
                                      self._log_lr_and_loss, output_transform,
                                      smooth_f, diverge_th)

        self.logger.debug(f"Running LR finder for {num_iter} iterations")
        if start_lr is None:
            start_lr = optimizer.param_groups[0]["lr"]
        # Initialize the proper learning rate policy
        if step_mode.lower() == "exp":
            start_lr = [start_lr] * len(optimizer.param_groups)  # type: ignore
            self._lr_schedule = LRScheduler(
                _ExponentialLR(optimizer, start_lr, end_lr, num_iter))
        else:
            self._lr_schedule = PiecewiseLinear(optimizer,
                                                param_name="lr",
                                                milestones_values=[
                                                    (0, start_lr),
                                                    (num_iter, end_lr)
                                                ])
        if not trainer.has_event_handler(self._lr_schedule):
            trainer.add_event_handler(Events.ITERATION_COMPLETED,
                                      self._lr_schedule, num_iter)
Example #3
0
def test_scheduler_with_param_groups():
    def _test(lr_scheduler, optimizer):
        num_iterations = 10
        max_epochs = 20

        state_dict = lr_scheduler.state_dict()

        trainer = Engine(lambda engine, batch: None)

        @trainer.on(Events.ITERATION_COMPLETED)
        def save_lr():
            lrs.append((optimizer.param_groups[0]["lr"], optimizer.param_groups[1]["lr"]))

        trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)

        data = [0] * num_iterations

        for _ in range(2):
            lrs = []
            trainer.run(data, max_epochs=max_epochs)
            assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
            lr_scheduler.load_state_dict(state_dict)

    t1 = torch.zeros([1], requires_grad=True)
    t2 = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])

    lr_scheduler = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
    _test(lr_scheduler, optimizer)

    lr_scheduler = PiecewiseLinear(
        optimizer, "lr", milestones_values=[(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]
    )
    _test(lr_scheduler, optimizer)

    lr_scheduler = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
    _test(lr_scheduler, optimizer)

    torch_lr_scheduler = ExponentialLR(optimizer, gamma=0.98)
    _test(LRScheduler(torch_lr_scheduler), optimizer)

    torch_lr_scheduler = StepLR(optimizer, step_size=50, gamma=0.5)
    _test(LRScheduler(torch_lr_scheduler), optimizer)
Example #4
0
def test_lr_scheduler(torch_lr_scheduler_cls, kwargs):

    if torch_lr_scheduler_cls is None:
        return

    tensor = torch.zeros([1], requires_grad=True)
    optimizer1 = torch.optim.SGD([tensor], lr=0.01)
    optimizer2 = torch.optim.SGD([tensor], lr=0.01)
    opt_state_dict1 = optimizer1.state_dict()
    opt_state_dict2 = optimizer2.state_dict()

    torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
    scheduler = LRScheduler(torch_lr_scheduler1)
    state_dict1 = scheduler.state_dict()

    torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
    state_dict2 = torch_lr_scheduler2.state_dict()

    def dummy_update(engine, batch):
        optimizer1.step()
        optimizer2.step()

    trainer = Engine(dummy_update)

    @trainer.on(Events.ITERATION_STARTED)
    def save_lr(engine):
        lrs.append(optimizer1.param_groups[0]["lr"])

    @trainer.on(Events.ITERATION_STARTED)
    def save_true_lr(engine):
        lrs_true.append(optimizer2.param_groups[0]["lr"])

    @trainer.on(Events.ITERATION_COMPLETED)
    def torch_lr_scheduler_step(engine):
        torch_lr_scheduler2.step()

    trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)

    for _ in range(2):
        lrs = []
        lrs_true = []
        data = [0] * 10
        max_epochs = 2
        trainer.run(data, max_epochs=max_epochs)
        assert lrs_true == pytest.approx(lrs), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs} ({len(lrs)})"
        optimizer1.load_state_dict(opt_state_dict1)
        scheduler.load_state_dict(state_dict1)
        optimizer2.load_state_dict(opt_state_dict2)
        torch_lr_scheduler2.load_state_dict(state_dict2)

    optimizer3 = torch.optim.SGD([tensor], lr=0.01)
    torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)

    simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler3)
    assert lrs == pytest.approx([v for i, v in simulated_values])
Example #5
0
def test_simulate_and_plot_values():

    import matplotlib

    matplotlib.use("Agg")

    def _test(scheduler_cls, **scheduler_kwargs):

        optimizer = None
        event = Events.ITERATION_STARTED
        if scheduler_cls == LRScheduler:
            optimizer = scheduler_kwargs["lr_scheduler"].optimizer
            event = Events.ITERATION_COMPLETED
        elif scheduler_cls == ConcatScheduler:
            optimizer = scheduler_kwargs["optimizer"]
            del scheduler_kwargs["optimizer"]
        else:
            tensor = torch.zeros([1], requires_grad=True)
            scheduler_kwargs["optimizer"] = torch.optim.SGD([tensor], lr=0.1)
            optimizer = scheduler_kwargs["optimizer"]

        max_epochs = 2
        data = [0] * 10
        # simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)

        scheduler = scheduler_cls(**scheduler_kwargs)

        lrs = []

        def save_lr(engine):
            lrs.append(optimizer.param_groups[0]["lr"])

        trainer = Engine(lambda engine, batch: None)
        trainer.add_event_handler(event, scheduler)
        trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)
        trainer.run(data, max_epochs=max_epochs)

        # assert lrs == pytest.approx([v for i, v in simulated_values])

        if scheduler_cls == LRScheduler or scheduler_cls == ConcatScheduler:
            # As internal state of torch lr scheduler has been changed the following checks will fail
            return

        # reexecute to check if no internal changes
        # simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,
        #                                                  save_history=True,  # this will be removed
        #                                                  **scheduler_kwargs)
        # assert lrs == pytest.approx([v for i, v in simulated_values])

        # launch plot values
        scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)

    # LinearCyclicalScheduler
    _test(LinearCyclicalScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)

    # CosineAnnealingScheduler
    _test(CosineAnnealingScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)

    # LRScheduler
    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0.1)
    torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.5)

    _test(LRScheduler, lr_scheduler=torch_lr_scheduler)

    # ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler]
    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=20)
    scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
    durations = [10]
    _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)

    # ConcatScheduler = [LinearCyclicalScheduler, LRScheduler]
    tensor = torch.ones([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0.001)
    torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5)
    scheduler_1 = LRScheduler(torch_lr_scheduler)
    scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.1, end_value=0.0, cycle_size=10)
    durations = [10]
    _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)

    # PiecewiseLinear
    tensor = torch.ones([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0.001)
    _test(
        PiecewiseLinear,
        optimizer=optimizer,
        param_name="lr",
        milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
    )

    with pytest.raises(RuntimeError, match=r"This method requires matplotlib to be installed."):
        with patch.dict("sys.modules", {"matplotlib.pyplot": None}):
            _test(
                PiecewiseLinear,
                optimizer=optimizer,
                param_name="lr",
                milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
            )
Example #6
0
def test_lr_scheduler(torch_lr_scheduler_cls, kwargs):

    if torch_lr_scheduler_cls is None:
        return

    tensor = torch.zeros([1], requires_grad=True)
    optimizer1 = torch.optim.SGD([tensor], lr=0.01)
    optimizer2 = torch.optim.SGD([tensor], lr=0.01)
    optimizer3 = torch.optim.SGD([tensor], lr=0.01)
    opt_state_dict1 = optimizer1.state_dict()
    opt_state_dict2 = optimizer2.state_dict()
    opt_state_dict3 = optimizer3.state_dict()

    torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
    scheduler1 = LRScheduler(torch_lr_scheduler1)
    state_dict1 = scheduler1.state_dict()

    torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
    with pytest.warns(UserWarning, match=r"the first lr value from the optimizer, otherwise it is will be skipped"):
        scheduler2 = LRScheduler(torch_lr_scheduler2, use_legacy=True)
    state_dict2 = scheduler2.state_dict()

    torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
    state_dict3 = torch_lr_scheduler3.state_dict()

    def dummy_update(engine, batch):
        optimizer1.step()
        optimizer2.step()
        optimizer3.step()

    trainer = Engine(dummy_update)
    trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)

    @trainer.on(Events.ITERATION_STARTED)
    def save_lr1(engine):
        lrs1.append(optimizer1.param_groups[0]["lr"])

    @trainer.on(Events.ITERATION_STARTED)
    def save_lr2(engine):
        lrs2.append(optimizer2.param_groups[0]["lr"])

    @trainer.on(Events.ITERATION_STARTED)
    def save_true_lr(engine):
        lrs_true.append(optimizer3.param_groups[0]["lr"])

    @trainer.on(Events.ITERATION_COMPLETED)
    def torch_lr_scheduler_step(engine):
        torch_lr_scheduler3.step()

    trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler2)

    for _ in range(2):
        lrs1 = []
        lrs2 = []
        lrs_true = []
        data = [0] * 10
        max_epochs = 2
        trainer.run(data, max_epochs=max_epochs)
        assert lrs_true == pytest.approx(lrs1), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs1} ({len(lrs1)})"
        assert lrs_true == pytest.approx(lrs2), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs2} ({len(lrs2)})"
        optimizer1.load_state_dict(opt_state_dict1)
        scheduler1.load_state_dict(state_dict1)
        optimizer2.load_state_dict(opt_state_dict2)
        scheduler2.load_state_dict(state_dict2)
        optimizer3.load_state_dict(opt_state_dict3)
        torch_lr_scheduler3.load_state_dict(state_dict3)

    optimizer4 = torch.optim.SGD([tensor], lr=0.01)
    torch_lr_scheduler4 = torch_lr_scheduler_cls(optimizer=optimizer4, **kwargs)

    simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler4)
    assert lrs1 == pytest.approx([v for i, v in simulated_values])
    assert lrs2 == pytest.approx([v for i, v in simulated_values])