Example #1
0
def test_concat_scheduler_state_dict():
    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)
    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
    scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
    durations = [10]
    concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)
    state_dict = concat_scheduler.state_dict()

    assert state_dict["durations"] == durations
    assert state_dict["_current_duration"] == durations[0]
    assert state_dict["_scheduler_index"] == 0

    for _ in range(20):
        concat_scheduler(None, None)

    concat_scheduler.load_state_dict(state_dict)
    assert concat_scheduler.durations == durations
    assert concat_scheduler._current_duration == durations[0]
    assert id(concat_scheduler._current_scheduler) == id(scheduler_1)

    with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
        concat_scheduler.load_state_dict({"a": 1})

    with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of concatenated schedulers"):
        concat_scheduler.load_state_dict({"schedulers": []})

    with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary, but given"):
        concat_scheduler.load_state_dict(None)
Example #2
0
    def _test(save_history):
        tensor = torch.ones([1], requires_grad=True)
        optimizer = torch.optim.SGD([tensor], lr=0.001)

        max_epochs = 25
        lr_max_value = 0.4
        num_iterations_per_epoch = 128
        num_iterations = max_epochs * num_iterations_per_epoch
        warmup_duration = 5 * num_iterations_per_epoch
        cooldown_duration = 5 * num_iterations_per_epoch

        scheduler_1 = LinearCyclicalScheduler(
            optimizer,
            "lr",
            start_value=lr_max_value,
            end_value=lr_max_value * 0.9,
            cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2,
        )

        scheduler_2 = LinearCyclicalScheduler(
            optimizer, "lr", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2
        )

        lr_scheduler = ConcatScheduler(
            schedulers=[scheduler_1, scheduler_2],
            durations=[num_iterations - warmup_duration - cooldown_duration],
            save_history=False,
        )
        lr_values = [None] * num_iterations
        scheduler = create_lr_scheduler_with_warmup(
            lr_scheduler,
            warmup_start_value=0.0,
            warmup_end_value=lr_max_value,
            warmup_duration=warmup_duration,
            save_history=save_history,
            output_simulated_values=lr_values,
        )
        state_dict = scheduler.state_dict()

        trainer = Engine(lambda engine, batch: None)

        @trainer.on(Events.ITERATION_COMPLETED)
        def save_lr(engine):
            lrs.append(optimizer.param_groups[0]["lr"])

        trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

        data = [0] * num_iterations_per_epoch

        for _ in range(2):
            lrs = []
            trainer.run(data, max_epochs=max_epochs)

            assert lrs == pytest.approx([v for i, v in lr_values])

            if save_history:
                param_history = trainer.state.param_history["lr"]
                assert lrs == pytest.approx([v[0] for v in param_history])

            scheduler.load_state_dict(state_dict)
Example #3
0
def test_concat_scheduler_asserts():

    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)

    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
    scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)

    with pytest.raises(ValueError):
        ConcatScheduler(schedulers=[], durations=[])

    with pytest.raises(ValueError):
        ConcatScheduler(schedulers=[scheduler_1], durations=[10])

    with pytest.raises(TypeError):
        ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])

    with pytest.raises(ValueError):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])

    with pytest.raises(ValueError):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])

    with pytest.raises(ValueError):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc")

    with pytest.raises(ValueError):
        ConcatScheduler.simulate_values(
            num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names="abc"
        )
Example #4
0
def test_concat_scheduler():
    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)

    concat_scheduler = ConcatScheduler(
        optimizer=optimizer,
        param_name='lr',
        schedulers_list=[
            (
                LinearCyclicalScheduler,
                dict(
                    start_value=1,
                    end_value=0,
                    cycle_size=10
                ),
                10
            ),
            (
                CosineAnnealingScheduler,
                dict(
                    start_value=0,
                    end_value=1,
                    cycle_size=10
                ),
                None
            )
        ],
        save_history=True
    )

    lrs = []

    def save_lr(engine):
        lrs.append(optimizer.param_groups[0]['lr'])

    trainer = Engine(lambda engine, batch: None)
    trainer.add_event_handler(Events.ITERATION_COMPLETED, concat_scheduler)
    trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
    trainer.run([0] * 10, max_epochs=2)

    assert lrs == list(map(pytest.approx, [
        # Cycle 1 of the LinearCyclicalScheduler
        1.0, 0.8, 0.6, 0.4, 0.2,
        0.0, 0.2, 0.4, 0.6, 0.8,
        # Cycle 1 of the CosineAnnealingScheduler
        0.0, 0.02447174185242318, 0.09549150281252627, 0.20610737385376332, 0.3454915028125263,
        0.5, 0.6545084971874737, 0.7938926261462365, 0.9045084971874737, 0.9755282581475768,
    ]))
Example #5
0
def test_concat_scheduler_asserts():

    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)

    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
    scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)

    with pytest.raises(TypeError, match=r"Argument schedulers should be a sequence"):
        ConcatScheduler(schedulers=None, durations=[])

    with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
        ConcatScheduler(schedulers=[], durations=[])

    with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
        ConcatScheduler(schedulers=[scheduler_1], durations=[10])

    with pytest.raises(TypeError, match=r"Value at index 1 of schedulers should be a parameter scheduler"):
        ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])

    with pytest.raises(ValueError, match=r"Incorrect number schedulers or duration values"):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])

    with pytest.raises(ValueError, match=r"Argument durations should be list/tuple of integers"):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])

    with pytest.raises(TypeError, match=r"Argument durations should be list/tuple"):
        ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc")

    with pytest.raises(TypeError, match=r"Argument param_names should be list or tuple"):
        ConcatScheduler.simulate_values(
            num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names="abc"
        )

    with pytest.raises(ValueError, match=r"Argument param_names should be list or tuple of strings"):
        ConcatScheduler.simulate_values(
            num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names=[1]
        )

    optimizer_2 = torch.optim.SGD([tensor], lr=0)
    scheduler_3 = CosineAnnealingScheduler(optimizer_2, "lr", start_value=0.0, end_value=1.0, cycle_size=10)

    with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
        ConcatScheduler([scheduler_1, scheduler_3], durations=[30,])
Example #6
0
def test_concat_scheduler_two_linear():
    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)

    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.0, end_value=0.1, cycle_size=2)
    scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.2, end_value=1.0, cycle_size=2)

    durations = [5, ]
    concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2],
                                       durations=durations, save_history=True)

    assert concat_scheduler.get_param() == 0.0

    data = [0] * 10
    max_epochs = 2
    simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs,
                                                       schedulers=[scheduler_1, scheduler_2],
                                                       durations=durations)

    lrs = []

    def save_lr(engine):
        lrs.append(optimizer.param_groups[0]['lr'])

    trainer = Engine(lambda engine, batch: None)
    trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
    trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
    trainer.run(data, max_epochs=max_epochs)

    assert lrs == list(map(pytest.approx, [
        # first LinearCyclicalScheduler
        0.0, 0.1, 0.0, 0.1, 0.0,
        # second LinearCyclicalScheduler
        0.2, 1.0, 0.2, 1.0, 0.2, 1.0, 0.2, 1.0,
        0.2, 1.0, 0.2, 1.0, 0.2, 1.0, 0.2,
    ]))

    state_lrs = trainer.state.param_history['lr']
    assert len(state_lrs) == len(lrs)
    # Unpack singleton lists
    assert [group[0] for group in state_lrs] == lrs

    assert lrs == pytest.approx([v for i, v in simulated_values])
Example #7
0
def test_concat_scheduler_3_schedulers():
    tensor = torch.zeros([1], requires_grad=True)
    optimizer = torch.optim.SGD([tensor], lr=0)

    scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.5, cycle_size=20)
    scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.45, cycle_size=10)
    scheduler_3 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.0, cycle_size=20)
    durations = [10, 5]

    concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_3],
                                       durations=durations, save_history=True)

    data = [0] * 10
    max_epochs = 2
    simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs,
                                                       schedulers=[scheduler_1, scheduler_2, scheduler_3],
                                                       durations=durations)
    lrs = []

    def save_lr(engine):
        lrs.append(optimizer.param_groups[0]['lr'])

    trainer = Engine(lambda engine, batch: None)
    trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
    trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
    trainer.run(data, max_epochs=max_epochs)

    assert lrs == list(map(pytest.approx, [
        # Cycle 1 of the first LinearCyclicalScheduler
        1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55,
        # Cycle 1 of the second LinearCyclicalScheduler
        0.5, 0.49, 0.48, 0.47, 0.46,
        # Cycle 1 of the third LinearCyclicalScheduler
        0.5, 0.45, 0.4, 0.35, 0.3,
    ]))

    state_lrs = trainer.state.param_history['lr']
    assert len(state_lrs) == len(lrs)
    # Unpack singleton lists
    assert [group[0] for group in state_lrs] == lrs

    assert lrs == pytest.approx([v for i, v in simulated_values])
Example #8
0
    def _test(duration_vals_as_np_int):
        scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
        scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)

        durations = [10, ]
        if duration_vals_as_np_int:
            durations = [np.int64(t) for t in durations]

        concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2],
                                           durations=durations, save_history=True)

        data = [0] * 10
        max_epochs = 2
        simulated_values = ConcatScheduler.simulate_values(num_events=len(data) * max_epochs,
                                                           schedulers=[scheduler_1, scheduler_2],
                                                           durations=durations)

        lrs = []

        def save_lr(engine):
            lrs.append(optimizer.param_groups[0]['lr'])

        trainer = Engine(lambda engine, batch: None)
        trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
        trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
        trainer.run(data, max_epochs=max_epochs)

        assert lrs == list(map(pytest.approx, [
            # Cycle 1 of the LinearCyclicalScheduler
            1.0, 0.8, 0.6, 0.4, 0.2,
            0.0, 0.2, 0.4, 0.6, 0.8,
            # Cycle 1 of the CosineAnnealingScheduler
            0.0, 0.02447174185242318, 0.09549150281252627, 0.20610737385376332, 0.3454915028125263,
            0.5, 0.6545084971874737, 0.7938926261462365, 0.9045084971874737, 0.9755282581475768,
        ]))

        state_lrs = trainer.state.param_history['lr']
        assert len(state_lrs) == len(lrs)
        # Unpack singleton lists
        assert [group[0] for group in state_lrs] == lrs

        assert lrs == pytest.approx([v for i, v in simulated_values])