def test_suggest_lr():
    task = mod_task.XORTask()
    lr_finder = prepare_lr_finder(task)

    lr_finder.history["loss"] = [10, 8, 4, 1, 4, 16]
    lr_finder.history["lr"] = range(len(lr_finder.history["loss"]))

    fig, ax = plt.subplots()
    ax, lr = lr_finder.plot(skip_start=0, skip_end=0, suggest_lr=True, ax=ax)

    assert lr == 2

    # Loss with minimal gradient is the first element in history
    lr_finder.history["loss"] = [1, 0, 1, 2, 3, 4]
    lr_finder.history["lr"] = range(len(lr_finder.history["loss"]))

    fig, ax = plt.subplots()
    ax, lr = lr_finder.plot(skip_start=0, skip_end=0, suggest_lr=True, ax=ax)

    assert lr == 0

    # Loss with minimal gradient is the last element in history
    lr_finder.history["loss"] = [0, 1, 2, 3, 4, 3]
    lr_finder.history["lr"] = range(len(lr_finder.history["loss"]))

    fig, ax = plt.subplots()
    ax, lr = lr_finder.plot(skip_start=0, skip_end=0, suggest_lr=True, ax=ax)

    assert lr == len(lr_finder.history["loss"]) - 1
def test_plot_with_skip_and_suggest_lr(suggest_lr, skip_start, skip_end):
    task = mod_task.XORTask()
    num_iter = 11
    # prepare_lr_finder sets the starting lr to 1e-5
    lr_finder = prepare_lr_finder(task)
    lr_finder.range_test(task.train_loader,
                         num_iter=num_iter,
                         step_mode="exp",
                         end_lr=0.1)

    fig, ax = plt.subplots()
    results = lr_finder.plot(skip_start=skip_start,
                             skip_end=skip_end,
                             suggest_lr=suggest_lr,
                             ax=ax)

    if num_iter - skip_start - skip_end <= 1:
        # handle data with one or zero lr
        assert len(ax.lines) == 1
        assert results is ax
    else:
        # handle different suggest_lr
        # for 'steepest': the point with steepest gradient (minimal gradient)
        assert len(ax.lines) == 1
        assert len(ax.collections) == int(suggest_lr)
        if results is not ax:
            assert len(results) == 2
Beispiel #3
0
def test_scheduler_and_num_iter(num_iter, scheduler):
    task = mod_task.XORTask()
    # prepare_lr_finder sets the starting lr to 1e-5
    lr_finder = prepare_lr_finder(task)
    with pytest.raises(ValueError, match="num_iter"):
        lr_finder.range_test(task.train_loader,
                             num_iter=num_iter,
                             step_mode=scheduler,
                             end_lr=5e-5)
    def test_run_range_test_with_traindataloaderiter(self, mocker):
        task = mod_task.XORTask()
        lr_finder = prepare_lr_finder(task)
        num_iter = 5

        loader_iter = TrainDataLoaderIter(task.train_loader)
        spy = mocker.spy(loader_iter, "inputs_labels_from_batch")

        lr_finder.range_test(loader_iter, num_iter=num_iter)
        assert spy.call_count == num_iter
    def test_run_range_test_with_trainloaderiter_without_subclassing(self):
        task = mod_task.XORTask()
        lr_finder = prepare_lr_finder(task)
        num_iter = 5

        loader_iter = CustomLoaderIter(task.train_loader)

        with pytest.raises(ValueError,
                           match="`train_loader` has unsupported type"):
            lr_finder.range_test(loader_iter, num_iter=num_iter)
def test_suggest_lr():
    task = mod_task.XORTask()
    lr_finder = prepare_lr_finder(task)

    lr_finder.history["loss"] = [10, 8, 4, 1, 4, 16]
    lr_finder.history["lr"] = range(len(lr_finder.history["loss"]))

    fig, ax = plt.subplots()
    ax, lr = lr_finder.plot(skip_start=0, skip_end=0, suggest_lr=True, ax=ax)

    assert lr == 2
Beispiel #7
0
    def test_exponential_lr_history(self):
        task = mod_task.XORTask()
        # prepare_lr_finder sets the starting lr to 1e-5
        lr_finder = prepare_lr_finder(task)
        lr_finder.range_test(task.train_loader,
                             num_iter=5,
                             step_mode="exp",
                             end_lr=0.1)

        assert len(lr_finder.history["lr"]) == 5
        assert lr_finder.history["lr"] == pytest.approx(
            [1e-5, 1e-4, 1e-3, 1e-2, 0.1])
Beispiel #8
0
    def test_linear_lr_history(self):
        task = mod_task.XORTask()
        # prepare_lr_finder sets the starting lr to 1e-5
        lr_finder = prepare_lr_finder(task)
        lr_finder.range_test(task.train_loader,
                             num_iter=5,
                             step_mode="linear",
                             end_lr=5e-5)

        assert len(lr_finder.history["lr"]) == 5
        assert lr_finder.history["lr"] == pytest.approx(
            [1e-5, 2e-5, 3e-5, 4e-5, 5e-5])
    def test_run_range_test_with_valloaderiter_without_subclassing(self):
        task = mod_task.XORTask(validate=True)
        lr_finder = prepare_lr_finder(task)
        num_iter = 5

        train_loader_iter = TrainDataLoaderIter(task.train_loader)
        val_loader_iter = CustomLoaderIter(task.val_loader)

        with pytest.raises(ValueError,
                           match="`val_loader` has unsupported type"):
            lr_finder.range_test(train_loader_iter,
                                 val_loader=val_loader_iter,
                                 num_iter=num_iter)
    def test_run_range_test_with_valdataloaderiter(self, mocker):
        task = mod_task.XORTask(validate=True)
        lr_finder = prepare_lr_finder(task)
        num_iter = 5

        train_loader_iter = TrainDataLoaderIter(task.train_loader)
        val_loader_iter = ValDataLoaderIter(task.val_loader)
        spy_train = mocker.spy(train_loader_iter, "inputs_labels_from_batch")
        spy_val = mocker.spy(val_loader_iter, "inputs_labels_from_batch")

        lr_finder.range_test(train_loader_iter,
                             val_loader=val_loader_iter,
                             num_iter=num_iter)
        assert spy_train.call_count == num_iter
        assert spy_val.call_count == num_iter * len(task.val_loader)
    def test_gradient_accumulation(self, mocker):
        desired_bs, accum_steps = 32, 4
        real_bs = desired_bs // accum_steps
        num_iter = 10
        task = mod_task.XORTask(batch_size=real_bs)

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(lr_finder, "criterion")

        lr_finder.range_test(task.train_loader,
                             num_iter=num_iter,
                             accumulation_steps=accum_steps)
        # NOTE: We are using smaller batch size to simulate a large batch.
        # So that the actual times of model/criterion called should be
        # `(desired_bs/real_bs) * num_iter` == `accum_steps * num_iter`
        assert spy.call_count == accum_steps * num_iter
    def test_mixed_precision(self, mocker):
        batch_size = 32
        num_iter = 10
        task = mod_task.XORTask(batch_size=batch_size)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)
        assert hasattr(task.optimizer, "_amp_stash")

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(task.train_loader, num_iter=num_iter)
        # NOTE: Here we did not perform gradient accumulation, so that call count
        # of `amp.scale_loss` should equal to `num_iter`.
        assert spy.call_count == num_iter
    def test_gradient_accumulation_with_apex_amp(self, mocker):
        desired_bs, accum_steps = 32, 4
        real_bs = desired_bs // accum_steps
        num_iter = 10
        task = mod_task.XORTask(batch_size=real_bs)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(task.train_loader,
                             num_iter=num_iter,
                             accumulation_steps=accum_steps)
        assert spy.call_count == accum_steps * num_iter