Ejemplo n.º 1
0
def test_get_total_number_of_training_epochs() -> None:
    c = DeepLearningConfig(num_epochs=2, should_validate=False)
    assert c.get_total_number_of_training_epochs() == 2
    c = DeepLearningConfig(num_epochs=10, should_validate=False)
    # Fake recovering training
    c.recovery_start_epoch = 2
    assert c.get_total_number_of_training_epochs() == 8
def test_get_test_epochs() -> None:
    """
    Test if the creation of the list of epochs for model testing will always contain at least the last training epoch.
    """
    c = DeepLearningConfig(num_epochs=2,
                           test_start_epoch=100,
                           test_diff_epochs=2,
                           test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [2]
    c = DeepLearningConfig(num_epochs=100,
                           test_start_epoch=100,
                           test_diff_epochs=2,
                           test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
    c = DeepLearningConfig(num_epochs=150,
                           test_start_epoch=100,
                           test_diff_epochs=2,
                           test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100, 110, 150]
    c = DeepLearningConfig(num_epochs=100,
                           test_start_epoch=100,
                           test_diff_epochs=0,
                           test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
    c = DeepLearningConfig(num_epochs=100,
                           test_start_epoch=200,
                           test_diff_epochs=None,
                           test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
Ejemplo n.º 3
0
def test_get_test_epochs() -> None:
    """
    Test if the creation of the list of epochs for model testing will always contain at least the last training epoch.
    """
    c = DeepLearningConfig(num_epochs=2, test_start_epoch=100, test_diff_epochs=2, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [2]
    c = DeepLearningConfig(num_epochs=100, test_start_epoch=100, test_diff_epochs=2, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
    c = DeepLearningConfig(num_epochs=150, test_start_epoch=100, test_diff_epochs=2, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100, 110, 150]
    c = DeepLearningConfig(num_epochs=100, test_start_epoch=100, test_diff_epochs=0, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
    c = DeepLearningConfig(num_epochs=100, test_start_epoch=200, test_diff_epochs=None, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [100]
    c = DeepLearningConfig(num_epochs=100, epochs_to_test=[1, 3, 5],
                           should_validate=False)
    assert c.get_test_epochs() == [1, 3, 5, 100]

    # epochs_to_test should have precedence over (test_start_epoch, test_diff_epochs and test_step_epochs)
    c = DeepLearningConfig(num_epochs=150, epochs_to_test=[1, 3, 5],
                           test_start_epoch=100, test_diff_epochs=2, test_step_epochs=10,
                           should_validate=False)
    assert c.get_test_epochs() == [1, 3, 5, 150]
Ejemplo n.º 4
0
def test_config_str() -> None:
    """
    Check if dataframe fields are omitted from the string conversion of a config object.
    """
    config = DeepLearningConfig()
    df = DataFrame(columns=["foobar"], data=[1.0, 2.0])
    config.dataset_data_frame = df
    s = str(config)
    assert "foobar" not in s, f"Incorrect output: {s}"
def test_should_save_epoch(save_start_epoch: int, save_step_epochs: int,
                           num_epochs: int, expected_true: List[int],
                           verify_up_to_epoch: int) -> None:
    train_config = DeepLearningConfig(save_start_epoch=save_start_epoch,
                                      save_step_epochs=save_step_epochs,
                                      num_epochs=num_epochs,
                                      should_validate=False)
    for epoch in expected_true:
        assert train_config.should_save_epoch(
            epoch), "Epoch {} should be saved".format(epoch)
    expected_false = set(range(1, verify_up_to_epoch + 1)) - set(expected_true)
    for epoch in expected_false:
        assert not train_config.should_save_epoch(
            epoch), "Epoch {} should not be saved".format(epoch)
def test_lr_scheduler_with_warmup(warmup_epochs: int,
                                  expected_values: List[float]) -> None:
    """
    Check that warmup is applied correctly to a multistep scheduler
    """
    initial_lr = 1
    optimizer = torch.optim.Adam([torch.ones(2, 2, requires_grad=True)],
                                 lr=initial_lr)
    config = DeepLearningConfig(l_rate=initial_lr,
                                l_rate_scheduler=LRSchedulerType.MultiStep,
                                l_rate_multi_step_milestones=[2, 4],
                                l_rate_multi_step_gamma=0.5,
                                l_rate_warmup_epochs=warmup_epochs,
                                l_rate_warmup=LRWarmUpType.Linear,
                                should_validate=False)
    scheduler = SchedulerWithWarmUp(config, optimizer)
    lrs = enumerate_scheduler(scheduler, 4)
    assert lrs == expected_values
def test_use_gpu_flag(use_gpu_override: bool) -> None:
    config = DeepLearningConfig(should_validate=False)
    # On a model that does not have a use_gpu_override, the use_gpu flag should return True exactly when a GPU is
    # actually present.
    assert config.use_gpu == machine_has_gpu
    if machine_has_gpu:
        # If a GPU is present, the use_gpu flag should exactly return whatever the override says
        # (we can run in CPU mode even on a GPU)
        config.use_gpu = use_gpu_override
        assert config.use_gpu == use_gpu_override
    else:
        if use_gpu_override:
            # We are on a machine without a GPU, but the override says we should use the GPU: fail.
            with pytest.raises(ValueError) as ex:
                config.use_gpu = use_gpu_override
            assert "use_gpu to True if there is not CUDA capable GPU present" in str(
                ex)
        else:
            config.use_gpu = use_gpu_override
            assert config.use_gpu == use_gpu_override
def test_validate() -> None:
    # DeepLearningConfig cannot be initialized with neither of these these parameters set
    with pytest.raises(ValueError) as ex:
        DeepLearningConfig(local_dataset=None, azure_dataset_id=None)
        assert ex.value.args[
            0] == "Either of local_dataset or azure_dataset_id must be set."

    # The following should be okay
    DeepLearningConfig(local_dataset=Path("foo"))
    DeepLearningConfig(azure_dataset_id="bar")

    # DeepLearningConfig cannot be initialized with both these parameters set
    with pytest.raises(ValueError) as ex:
        DeepLearningConfig(local_dataset=Path("foo"),
                           local_weights_path=Path("foo"),
                           weights_url="bar")
        assert ex.value.args[
            0] == "Cannot specify both local_weights_path and weights_url."

    # The following should be okay
    DeepLearningConfig(local_dataset=Path("foo"),
                       local_weights_path=Path("foo"))
    DeepLearningConfig(local_dataset=Path("foo"), weights_url="bar")
def test_get_total_number_of_training_epochs() -> None:
    c = DeepLearningConfig(num_epochs=2, should_validate=False)
    assert c.get_total_number_of_training_epochs() == 2
    c = DeepLearningConfig(num_epochs=10, start_epoch=5, should_validate=False)
    assert c.get_total_number_of_training_epochs() == 5
Ejemplo n.º 10
0
#  ------------------------------------------------------------------------------------------
#  Copyright (c) Microsoft Corporation. All rights reserved.
#  Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
#  ------------------------------------------------------------------------------------------

import pytest

from InnerEye.ML.deep_learning_config import DeepLearningConfig, TrainerParams
from InnerEye.ML.lightning_container import LightningContainer
from Tests.ML.util import machine_has_gpu


@pytest.mark.cpu_and_gpu
@pytest.mark.parametrize(
    "config",
    [DeepLearningConfig(should_validate=False),
     LightningContainer()])
def test_use_gpu_flag(config: TrainerParams) -> None:
    """
    Test that the use_gpu flag is set correctly on both InnerEye configs and containers.
    This checks for a bug in an earlier version where it was off for containers only.
    """
    # With the default settings, the use_gpu flag should return True exactly when a GPU is
    # actually present.
    assert config.use_gpu == machine_has_gpu
    if machine_has_gpu:
        # If there is a GPU present, only setting max_num_gpus to 0 should make the use_gpu flag False.
        config.max_num_gpus = -1
        assert config.use_gpu
        config.max_num_gpus = 1
        assert config.use_gpu
Ejemplo n.º 11
0
#  ------------------------------------------------------------------------------------------

import pytest

from InnerEye.Common import common_util
from InnerEye.ML.deep_learning_config import DeepLearningConfig, TrainerParams
from InnerEye.ML.lightning_container import LightningContainer
from Tests.ML.util import machine_has_gpu


@pytest.mark.skipif(common_util.is_windows(),
                    reason="Has issues on windows build")
@pytest.mark.cpu_and_gpu
@pytest.mark.parametrize(
    "config",
    [DeepLearningConfig(), LightningContainer()])
def test_use_gpu_flag(config: TrainerParams) -> None:
    """
    Test that the use_gpu flag is set correctly on both InnerEye configs and containers.
    This checks for a bug in an earlier version where it was off for containers only.
    """
    # With the default settings, the use_gpu flag should return True exactly when a GPU is
    # actually present.
    assert config.use_gpu == machine_has_gpu
    if machine_has_gpu:
        # If there is a GPU present, only setting max_num_gpus to 0 should make the use_gpu flag False.
        config.max_num_gpus = -1
        assert config.use_gpu
        config.max_num_gpus = 1
        assert config.use_gpu
        config.max_num_gpus = 0