Пример #1
0
def test_lr_scheduler_asserts():

    t1 = torch.zeros([1], requires_grad=True)
    t2 = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([
        {
            "params": t1,
            'lr': 0.1
        },
        {
            "params": t2,
            'lr': 0.1
        },
    ])
    lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer,
                                                          gamma=0.98)

    with pytest.raises(ValueError):
        LRScheduler(lr_scheduler)

    with pytest.raises(ValueError):
        LRScheduler.simulate_values(num_events=100, lr_scheduler=lr_scheduler)

    with pytest.raises(TypeError):
        LRScheduler(123)
Пример #2
0
def test_lr_scheduler_asserts():

    with pytest.raises(TypeError):
        LRScheduler(123)

    with pytest.raises(TypeError):
        LRScheduler.simulate_values(1, None)
Пример #3
0
def test_lr_scheduler_asserts():

    with pytest.raises(
        TypeError, match=r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler"
    ):
        LRScheduler(123)

    with pytest.raises(
        TypeError, match=r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler"
    ):
        LRScheduler.simulate_values(1, None)
Пример #4
0
    def _test(torch_lr_scheduler_cls, **kwargs):

        tensor = torch.zeros([1], requires_grad=True)
        optimizer1 = torch.optim.SGD([tensor], lr=0.01)
        optimizer2 = torch.optim.SGD([tensor], lr=0.01)
        opt_state_dict1 = optimizer1.state_dict()
        opt_state_dict2 = optimizer2.state_dict()

        torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1,
                                                     **kwargs)
        scheduler = LRScheduler(torch_lr_scheduler1)
        state_dict1 = scheduler.state_dict()

        torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2,
                                                     **kwargs)
        state_dict2 = torch_lr_scheduler2.state_dict()

        def dummy_update(engine, batch):
            optimizer1.step()
            optimizer2.step()

        trainer = Engine(dummy_update)

        @trainer.on(Events.ITERATION_STARTED)
        def save_lr(engine):
            lrs.append(optimizer1.param_groups[0]["lr"])

        @trainer.on(Events.ITERATION_STARTED)
        def save_true_lr(engine):
            lrs_true.append(optimizer2.param_groups[0]["lr"])

        @trainer.on(Events.ITERATION_COMPLETED)
        def torch_lr_scheduler_step(engine):
            torch_lr_scheduler2.step()

        trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)

        for _ in range(2):
            lrs = []
            lrs_true = []
            data = [0] * 10
            max_epochs = 2
            trainer.run(data, max_epochs=max_epochs)
            assert lrs_true == pytest.approx(
                lrs), "{}: {} ({}) vs {} ({})".format(_, lrs_true,
                                                      len(lrs_true), lrs,
                                                      len(lrs))
            optimizer1.load_state_dict(opt_state_dict1)
            scheduler.load_state_dict(state_dict1)
            optimizer2.load_state_dict(opt_state_dict2)
            torch_lr_scheduler2.load_state_dict(state_dict2)

        optimizer3 = torch.optim.SGD([tensor], lr=0.01)
        torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3,
                                                     **kwargs)

        simulated_values = LRScheduler.simulate_values(
            num_events=len(data) * max_epochs,
            lr_scheduler=torch_lr_scheduler3)
        assert lrs == pytest.approx([v for i, v in simulated_values])
Пример #5
0
    def _test(torch_lr_scheduler_cls, **kwargs):

        tensor = torch.zeros([1], requires_grad=True)
        optimizer1 = torch.optim.SGD([tensor], lr=0.01)
        optimizer2 = torch.optim.SGD([tensor], lr=0.01)

        torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1,
                                                     **kwargs)
        torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2,
                                                     **kwargs)
        scheduler = LRScheduler(torch_lr_scheduler1)

        lrs = []
        lrs_true = []

        def dummy_update(engine, batch):
            optimizer1.step()
            optimizer2.step()

        trainer = Engine(dummy_update)

        @trainer.on(Events.ITERATION_COMPLETED)
        def torch_lr_scheduler_step(engine):
            torch_lr_scheduler2.step()

        @trainer.on(Events.ITERATION_COMPLETED)
        def save_lr(engine):
            lrs.append(optimizer1.param_groups[0]['lr'])

        @trainer.on(Events.ITERATION_COMPLETED)
        def save_true_lr(engine):
            lrs_true.append(optimizer2.param_groups[0]['lr'])

        trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

        data = [0] * 10
        max_epochs = 2
        trainer.run(data, max_epochs=max_epochs)

        assert lrs_true == pytest.approx(lrs)

        optimizer3 = torch.optim.SGD([tensor], lr=0.01)
        torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3,
                                                     **kwargs)

        simulated_values = LRScheduler.simulate_values(
            num_events=len(data) * max_epochs,
            lr_scheduler=torch_lr_scheduler3)
        assert lrs == pytest.approx([v for i, v in simulated_values])